azul_webrender/renderer/
mod.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5//! The high-level module responsible for interfacing with the GPU.
6//!
7//! Much of WebRender's design is driven by separating work into different
8//! threads. To avoid the complexities of multi-threaded GPU access, we restrict
9//! all communication with the GPU to one thread, the render thread. But since
10//! issuing GPU commands is often a bottleneck, we move everything else (i.e.
11//! the computation of what commands to issue) to another thread, the
12//! RenderBackend thread. The RenderBackend, in turn, may delegate work to other
13//! thread (like the SceneBuilder threads or Rayon workers), but the
14//! Render-vs-RenderBackend distinction is the most important.
15//!
16//! The consumer is responsible for initializing the render thread before
17//! calling into WebRender, which means that this module also serves as the
18//! initial entry point into WebRender, and is responsible for spawning the
19//! various other threads discussed above. That said, WebRender initialization
20//! returns both the `Renderer` instance as well as a channel for communicating
21//! directly with the `RenderBackend`. Aside from a few high-level operations
22//! like 'render now', most of interesting commands from the consumer go over
23//! that channel and operate on the `RenderBackend`.
24//!
25//! ## Space conversion guidelines
26//! At this stage, we shuld be operating with `DevicePixel` and `FramebufferPixel` only.
27//! "Framebuffer" space represents the final destination of our rendeing,
28//! and it happens to be Y-flipped on OpenGL. The conversion is done as follows:
29//!   - for rasterized primitives, the orthographics projection transforms
30//! the content rectangle to -1 to 1
31//!   - the viewport transformation is setup to map the whole range to
32//! the framebuffer rectangle provided by the document view, stored in `DrawTarget`
33//!   - all the direct framebuffer operations, like blitting, reading pixels, and setting
34//! up the scissor, are accepting already transformed coordinates, which we can get by
35//! calling `DrawTarget::to_framebuffer_rect`
36
37use api::{BlobImageHandler, ColorF, ColorU, MixBlendMode};
38use api::{DocumentId, Epoch, ExternalImageHandler};
39use api::CrashAnnotator;
40#[cfg(feature = "replay")]
41use api::ExternalImageId;
42use api::{ExternalImageSource, ExternalImageType, FontRenderMode, ImageFormat};
43use api::{PipelineId, ImageRendering, Checkpoint, NotificationRequest};
44use api::{VoidPtrToSizeFn, PremultipliedColorF};
45use api::{RenderNotifier, ImageBufferKind, SharedFontInstanceMap};
46#[cfg(feature = "replay")]
47use api::ExternalImage;
48use api::units::*;
49use api::channel::{unbounded_channel, Receiver};
50pub use api::DebugFlags;
51use core::time::Duration;
52
53use crate::render_api::{RenderApiSender, DebugCommand, FrameMsg, MemoryReport};
54use crate::batch::{AlphaBatchContainer, BatchKind, BatchFeatures, BatchTextures, BrushBatchKind, ClipBatchList};
55#[cfg(any(feature = "capture", feature = "replay"))]
56use crate::capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
57use crate::composite::{CompositeState, CompositeTileSurface, ResolvedExternalSurface, CompositorSurfaceTransform};
58use crate::composite::{CompositorKind, Compositor, NativeTileId, CompositeFeatures, CompositeSurfaceFormat, ResolvedExternalSurfaceColorData};
59use crate::composite::{CompositorConfig, NativeSurfaceOperationDetails, NativeSurfaceId, NativeSurfaceOperation};
60use crate::composite::TileKind;
61use crate::c_str;
62use crate::debug_colors;
63use crate::device::{DepthFunction, Device, DrawTarget, ExternalTexture, GpuFrameId};
64use crate::device::{ProgramCache, ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot};
65use crate::device::{UploadMethod, UploadPBOPool, VertexUsageHint};
66use crate::device::query::{GpuSampler, GpuTimer};
67#[cfg(feature = "capture")]
68use crate::device::FBOId;
69use crate::debug_item::DebugItem;
70use crate::frame_builder::{Frame, ChasePrimitive, FrameBuilderConfig};
71use crate::glyph_cache::GlyphCache;
72use crate::glyph_rasterizer::{GlyphFormat, GlyphRasterizer};
73use crate::gpu_cache::{GpuCacheUpdate, GpuCacheUpdateList};
74use crate::gpu_cache::{GpuCacheDebugChunk, GpuCacheDebugCmd};
75use crate::gpu_types::{PrimitiveInstanceData, ScalingInstance, SvgFilterInstance};
76use crate::gpu_types::{BlurInstance, ClearInstance, CompositeInstance, ZBufferId, CompositorTransform};
77use crate::internal_types::{TextureSource, ResourceCacheError, TextureCacheCategory};
78#[cfg(any(feature = "capture", feature = "replay"))]
79use crate::internal_types::DebugOutput;
80use crate::internal_types::{CacheTextureId, FastHashMap, FastHashSet, RenderedDocument, ResultMsg};
81use crate::internal_types::{TextureCacheAllocInfo, TextureCacheAllocationKind, TextureUpdateList};
82use crate::internal_types::{RenderTargetInfo, Swizzle, DeferredResolveIndex};
83use crate::picture::{self, ResolvedSurfaceTexture};
84use crate::prim_store::DeferredResolve;
85use crate::profiler::{self, GpuProfileTag, TransactionProfile};
86use crate::profiler::{Profiler, add_event_marker, add_text_marker, thread_is_being_profiled};
87use crate::device::query::{GpuProfiler, GpuDebugMethod};
88use crate::render_backend::{FrameId, RenderBackend};
89use crate::render_task_graph::RenderTaskGraph;
90use crate::render_task::{RenderTask, RenderTaskKind, ReadbackTask};
91use crate::resource_cache::ResourceCache;
92use crate::scene_builder_thread::{SceneBuilderThread, SceneBuilderThreadChannels, LowPrioritySceneBuilderThread};
93use crate::screen_capture::AsyncScreenshotGrabber;
94use crate::render_target::{AlphaRenderTarget, ColorRenderTarget, PictureCacheTarget};
95use crate::render_target::{RenderTarget, TextureCacheRenderTarget};
96use crate::render_target::{RenderTargetKind, BlitJob};
97use crate::texture_cache::{TextureCache, TextureCacheConfig};
98use crate::tile_cache::PictureCacheDebugInfo;
99use crate::util::drain_filter;
100use crate::rectangle_occlusion as occlusion;
101use upload::{upload_to_texture_cache, UploadTexturePool};
102
103use euclid::{rect, Transform3D, Scale, default};
104use gl_context_loader::gl;
105use malloc_size_of::MallocSizeOfOps;
106use rayon::{ThreadPool, ThreadPoolBuilder};
107
108use std::{
109    cell::RefCell,
110    collections::VecDeque,
111    f32,
112    ffi::c_void,
113    mem,
114    num::NonZeroUsize,
115    path::PathBuf,
116    rc::Rc,
117    sync::Arc,
118    sync::atomic::{AtomicBool, Ordering},
119    thread,
120};
121#[cfg(any(feature = "capture", feature = "replay"))]
122use std::collections::hash_map::Entry;
123use tracy_rs::register_thread_with_profiler;
124use time::precise_time_ns;
125
126mod debug;
127mod gpu_cache;
128mod shade;
129mod vertex;
130mod upload;
131
132pub use debug::DebugRenderer;
133pub use shade::{Shaders, SharedShaders};
134pub use vertex::{desc, VertexArrayKind, MAX_VERTEX_TEXTURE_WIDTH};
135
136/// Use this hint for all vertex data re-initialization. This allows
137/// the driver to better re-use RBOs internally.
138pub const ONE_TIME_USAGE_HINT: VertexUsageHint = VertexUsageHint::Stream;
139
140/// Is only false if no WR instances have ever been created.
141static HAS_BEEN_INITIALIZED: AtomicBool = AtomicBool::new(false);
142
143/// Returns true if a WR instance has ever been initialized in this process.
144pub fn wr_has_been_initialized() -> bool {
145    HAS_BEEN_INITIALIZED.load(Ordering::SeqCst)
146}
147
148/// The size of the array of each type of vertex data texture that
149/// is round-robin-ed each frame during bind_frame_data. Doing this
150/// helps avoid driver stalls while updating the texture in some
151/// drivers. The size of these textures are typically very small
152/// (e.g. < 16 kB) so it's not a huge waste of memory. Despite that,
153/// this is a short-term solution - we want to find a better way
154/// to provide this frame data, which will likely involve some
155/// combination of UBO/SSBO usage. Although this only affects some
156/// platforms, it's enabled on all platforms to reduce testing
157/// differences between platforms.
158pub const VERTEX_DATA_TEXTURE_COUNT: usize = 3;
159
160/// Number of GPU blocks per UV rectangle provided for an image.
161pub const BLOCKS_PER_UV_RECT: usize = 2;
162
163const GPU_TAG_BRUSH_OPACITY: GpuProfileTag = GpuProfileTag {
164    label: "B_Opacity",
165    color: debug_colors::DARKMAGENTA,
166};
167const GPU_TAG_BRUSH_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
168    label: "B_LinearGradient",
169    color: debug_colors::POWDERBLUE,
170};
171const GPU_TAG_BRUSH_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
172    label: "B_YuvImage",
173    color: debug_colors::DARKGREEN,
174};
175const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
176    label: "B_MixBlend",
177    color: debug_colors::MAGENTA,
178};
179const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
180    label: "B_Blend",
181    color: debug_colors::ORANGE,
182};
183const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
184    label: "B_Image",
185    color: debug_colors::SPRINGGREEN,
186};
187const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
188    label: "B_Solid",
189    color: debug_colors::RED,
190};
191const GPU_TAG_CACHE_CLIP: GpuProfileTag = GpuProfileTag {
192    label: "C_Clip",
193    color: debug_colors::PURPLE,
194};
195const GPU_TAG_CACHE_BORDER: GpuProfileTag = GpuProfileTag {
196    label: "C_Border",
197    color: debug_colors::CORNSILK,
198};
199const GPU_TAG_CACHE_LINE_DECORATION: GpuProfileTag = GpuProfileTag {
200    label: "C_LineDecoration",
201    color: debug_colors::YELLOWGREEN,
202};
203const GPU_TAG_CACHE_FAST_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
204    label: "C_FastLinearGradient",
205    color: debug_colors::BROWN,
206};
207const GPU_TAG_CACHE_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
208    label: "C_LinearGradient",
209    color: debug_colors::BROWN,
210};
211const GPU_TAG_CACHE_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
212    label: "C_RadialGradient",
213    color: debug_colors::BROWN,
214};
215const GPU_TAG_CACHE_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
216    label: "C_ConicGradient",
217    color: debug_colors::BROWN,
218};
219const GPU_TAG_SETUP_TARGET: GpuProfileTag = GpuProfileTag {
220    label: "target init",
221    color: debug_colors::SLATEGREY,
222};
223const GPU_TAG_SETUP_DATA: GpuProfileTag = GpuProfileTag {
224    label: "data init",
225    color: debug_colors::LIGHTGREY,
226};
227const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
228    label: "SplitComposite",
229    color: debug_colors::DARKBLUE,
230};
231const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
232    label: "TextRun",
233    color: debug_colors::BLUE,
234};
235const GPU_TAG_BLUR: GpuProfileTag = GpuProfileTag {
236    label: "Blur",
237    color: debug_colors::VIOLET,
238};
239const GPU_TAG_BLIT: GpuProfileTag = GpuProfileTag {
240    label: "Blit",
241    color: debug_colors::LIME,
242};
243const GPU_TAG_SCALE: GpuProfileTag = GpuProfileTag {
244    label: "Scale",
245    color: debug_colors::GHOSTWHITE,
246};
247const GPU_SAMPLER_TAG_ALPHA: GpuProfileTag = GpuProfileTag {
248    label: "Alpha targets",
249    color: debug_colors::BLACK,
250};
251const GPU_SAMPLER_TAG_OPAQUE: GpuProfileTag = GpuProfileTag {
252    label: "Opaque pass",
253    color: debug_colors::BLACK,
254};
255const GPU_SAMPLER_TAG_TRANSPARENT: GpuProfileTag = GpuProfileTag {
256    label: "Transparent pass",
257    color: debug_colors::BLACK,
258};
259const GPU_TAG_SVG_FILTER: GpuProfileTag = GpuProfileTag {
260    label: "SvgFilter",
261    color: debug_colors::LEMONCHIFFON,
262};
263const GPU_TAG_COMPOSITE: GpuProfileTag = GpuProfileTag {
264    label: "Composite",
265    color: debug_colors::TOMATO,
266};
267const GPU_TAG_CLEAR: GpuProfileTag = GpuProfileTag {
268    label: "Clear",
269    color: debug_colors::CHOCOLATE,
270};
271
272/// The clear color used for the texture cache when the debug display is enabled.
273/// We use a shade of blue so that we can still identify completely blue items in
274/// the texture cache.
275pub const TEXTURE_CACHE_DBG_CLEAR_COLOR: [f32; 4] = [0.0, 0.0, 0.8, 1.0];
276
277impl BatchKind {
278    fn sampler_tag(&self) -> GpuProfileTag {
279        match *self {
280            BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
281            BatchKind::Brush(kind) => {
282                match kind {
283                    BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
284                    BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
285                    BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
286                    BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
287                    BrushBatchKind::YuvImage(..) => GPU_TAG_BRUSH_YUV_IMAGE,
288                    BrushBatchKind::LinearGradient => GPU_TAG_BRUSH_LINEAR_GRADIENT,
289                    BrushBatchKind::Opacity => GPU_TAG_BRUSH_OPACITY,
290                }
291            }
292            BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN,
293        }
294    }
295}
296
297fn flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool> {
298    if before & select != after & select {
299        Some(after.contains(select))
300    } else {
301        None
302    }
303}
304
305#[repr(C)]
306#[derive(Copy, Clone, Debug)]
307pub enum ShaderColorMode {
308    FromRenderPassMode = 0,
309    Alpha = 1,
310    SubpixelConstantTextColor = 2,
311    SubpixelWithBgColorPass0 = 3,
312    SubpixelWithBgColorPass1 = 4,
313    SubpixelWithBgColorPass2 = 5,
314    SubpixelDualSource = 6,
315    BitmapShadow = 7,
316    ColorBitmap = 8,
317    Image = 9,
318    MultiplyDualSource = 10,
319}
320
321impl From<GlyphFormat> for ShaderColorMode {
322    fn from(format: GlyphFormat) -> ShaderColorMode {
323        match format {
324            GlyphFormat::Alpha |
325            GlyphFormat::TransformedAlpha |
326            GlyphFormat::Bitmap => ShaderColorMode::Alpha,
327            GlyphFormat::Subpixel | GlyphFormat::TransformedSubpixel => {
328                panic!("Subpixel glyph formats must be handled separately.");
329            }
330            GlyphFormat::ColorBitmap => ShaderColorMode::ColorBitmap,
331        }
332    }
333}
334
335/// Enumeration of the texture samplers used across the various WebRender shaders.
336///
337/// Each variant corresponds to a uniform declared in shader source. We only bind
338/// the variants we need for a given shader, so not every variant is bound for every
339/// batch.
340#[derive(Debug, Copy, Clone, PartialEq, Eq)]
341pub(crate) enum TextureSampler {
342    Color0,
343    Color1,
344    Color2,
345    GpuCache,
346    TransformPalette,
347    RenderTasks,
348    Dither,
349    PrimitiveHeadersF,
350    PrimitiveHeadersI,
351    ClipMask,
352}
353
354impl TextureSampler {
355    pub(crate) fn color(n: usize) -> TextureSampler {
356        match n {
357            0 => TextureSampler::Color0,
358            1 => TextureSampler::Color1,
359            2 => TextureSampler::Color2,
360            _ => {
361                panic!("There are only 3 color samplers.");
362            }
363        }
364    }
365}
366
367impl Into<TextureSlot> for TextureSampler {
368    fn into(self) -> TextureSlot {
369        match self {
370            TextureSampler::Color0 => TextureSlot(0),
371            TextureSampler::Color1 => TextureSlot(1),
372            TextureSampler::Color2 => TextureSlot(2),
373            TextureSampler::GpuCache => TextureSlot(3),
374            TextureSampler::TransformPalette => TextureSlot(4),
375            TextureSampler::RenderTasks => TextureSlot(5),
376            TextureSampler::Dither => TextureSlot(6),
377            TextureSampler::PrimitiveHeadersF => TextureSlot(7),
378            TextureSampler::PrimitiveHeadersI => TextureSlot(8),
379            TextureSampler::ClipMask => TextureSlot(9),
380        }
381    }
382}
383
384#[derive(Clone, Debug, PartialEq)]
385pub enum GraphicsApi {
386    OpenGL,
387}
388
389#[derive(Clone, Debug)]
390pub struct GraphicsApiInfo {
391    pub kind: GraphicsApi,
392    pub renderer: String,
393    pub version: String,
394}
395
396#[derive(Debug)]
397pub struct GpuProfile {
398    pub frame_id: GpuFrameId,
399    pub paint_time_ns: u64,
400}
401
402impl GpuProfile {
403    fn new(frame_id: GpuFrameId, timers: &[GpuTimer]) -> GpuProfile {
404        let mut paint_time_ns = 0;
405        for timer in timers {
406            paint_time_ns += timer.time_ns;
407        }
408        GpuProfile {
409            frame_id,
410            paint_time_ns,
411        }
412    }
413}
414
415#[derive(Debug)]
416pub struct CpuProfile {
417    pub frame_id: GpuFrameId,
418    pub backend_time_ns: u64,
419    pub composite_time_ns: u64,
420    pub draw_calls: usize,
421}
422
423impl CpuProfile {
424    fn new(
425        frame_id: GpuFrameId,
426        backend_time_ns: u64,
427        composite_time_ns: u64,
428        draw_calls: usize,
429    ) -> CpuProfile {
430        CpuProfile {
431            frame_id,
432            backend_time_ns,
433            composite_time_ns,
434            draw_calls,
435        }
436    }
437}
438
439/// The selected partial present mode for a given frame.
440#[derive(Debug, Copy, Clone)]
441enum PartialPresentMode {
442    /// The device supports fewer dirty rects than the number of dirty rects
443    /// that WR produced. In this case, the WR dirty rects are union'ed into
444    /// a single dirty rect, that is provided to the caller.
445    Single {
446        dirty_rect: DeviceRect,
447    },
448}
449
450struct CacheTexture {
451    texture: Texture,
452    category: TextureCacheCategory,
453}
454
455/// Helper struct for resolving device Textures for use during rendering passes.
456///
457/// Manages the mapping between the at-a-distance texture handles used by the
458/// `RenderBackend` (which does not directly interface with the GPU) and actual
459/// device texture handles.
460struct TextureResolver {
461    /// A map to resolve texture cache IDs to native textures.
462    texture_cache_map: FastHashMap<CacheTextureId, CacheTexture>,
463
464    /// Map of external image IDs to native textures.
465    external_images: FastHashMap<DeferredResolveIndex, ExternalTexture>,
466
467    /// A special 1x1 dummy texture used for shaders that expect to work with
468    /// the output of the previous pass but are actually running in the first
469    /// pass.
470    dummy_cache_texture: Texture,
471}
472
473impl TextureResolver {
474    fn new(device: &mut Device) -> TextureResolver {
475        let dummy_cache_texture = device
476            .create_texture(
477                ImageBufferKind::Texture2D,
478                ImageFormat::RGBA8,
479                1,
480                1,
481                TextureFilter::Linear,
482                None,
483            );
484        device.upload_texture_immediate(
485            &dummy_cache_texture,
486            &[0xff, 0xff, 0xff, 0xff],
487        );
488
489        TextureResolver {
490            texture_cache_map: FastHashMap::default(),
491            external_images: FastHashMap::default(),
492            dummy_cache_texture,
493        }
494    }
495
496    fn deinit(self, device: &mut Device) {
497        device.delete_texture(self.dummy_cache_texture);
498
499        for (_id, item) in self.texture_cache_map {
500            device.delete_texture(item.texture);
501        }
502    }
503
504    fn begin_frame(&mut self) {
505    }
506
507    fn end_pass(
508        &mut self,
509        device: &mut Device,
510        textures_to_invalidate: &[CacheTextureId],
511    ) {
512        // For any texture that is no longer needed, immediately
513        // invalidate it so that tiled GPUs don't need to resolve it
514        // back to memory.
515        for texture_id in textures_to_invalidate {
516            let render_target = &self.texture_cache_map[texture_id].texture;
517            device.invalidate_render_target(render_target);
518        }
519    }
520
521    // Bind a source texture to the device.
522    fn bind(&self, texture_id: &TextureSource, sampler: TextureSampler, device: &mut Device) -> Swizzle {
523        match *texture_id {
524            TextureSource::Invalid => {
525                Swizzle::default()
526            }
527            TextureSource::Dummy => {
528                let swizzle = Swizzle::default();
529                device.bind_texture(sampler, &self.dummy_cache_texture, swizzle);
530                swizzle
531            }
532            TextureSource::External(ref index, _) => {
533                let texture = self.external_images
534                    .get(index)
535                    .expect("BUG: External image should be resolved by now");
536                device.bind_external_texture(sampler, texture);
537                Swizzle::default()
538            }
539            TextureSource::TextureCache(index, swizzle) => {
540                let texture = &self.texture_cache_map[&index].texture;
541                device.bind_texture(sampler, texture, swizzle);
542                swizzle
543            }
544        }
545    }
546
547    // Get the real (OpenGL) texture ID for a given source texture.
548    // For a texture cache texture, the IDs are stored in a vector
549    // map for fast access.
550    fn resolve(&self, texture_id: &TextureSource) -> Option<(&Texture, Swizzle)> {
551        match *texture_id {
552            TextureSource::Invalid => None,
553            TextureSource::Dummy => {
554                Some((&self.dummy_cache_texture, Swizzle::default()))
555            }
556            TextureSource::External(..) => {
557                panic!("BUG: External textures cannot be resolved, they can only be bound.");
558            }
559            TextureSource::TextureCache(index, swizzle) => {
560                Some((&self.texture_cache_map[&index].texture, swizzle))
561            }
562        }
563    }
564
565    // Retrieve the deferred / resolved UV rect if an external texture, otherwise
566    // return the default supplied UV rect.
567    fn get_uv_rect(
568        &self,
569        source: &TextureSource,
570        default_value: TexelRect,
571    ) -> TexelRect {
572        match source {
573            TextureSource::External(ref index, _) => {
574                let texture = self.external_images
575                    .get(index)
576                    .expect("BUG: External image should be resolved by now");
577                texture.get_uv_rect()
578            }
579            _ => {
580                default_value
581            }
582        }
583    }
584
585    /// Returns the size of the texture in pixels
586    fn get_texture_size(&self, texture: &TextureSource) -> DeviceIntSize {
587        match *texture {
588            TextureSource::Invalid => DeviceIntSize::zero(),
589            TextureSource::TextureCache(id, _) => {
590                self.texture_cache_map[&id].texture.get_dimensions()
591            },
592            TextureSource::External(index, _) => {
593                let uv_rect = self.external_images[&index].get_uv_rect();
594                (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32()
595            },
596            TextureSource::Dummy => DeviceIntSize::new(1, 1),
597        }
598    }
599
600    fn report_memory(&self) -> MemoryReport {
601        let mut report = MemoryReport::default();
602
603        // We're reporting GPU memory rather than heap-allocations, so we don't
604        // use size_of_op.
605        for item in self.texture_cache_map.values() {
606            let counter = match item.category {
607                TextureCacheCategory::Atlas => &mut report.atlas_textures,
608                TextureCacheCategory::Standalone => &mut report.standalone_textures,
609                TextureCacheCategory::PictureTile => &mut report.picture_tile_textures,
610                TextureCacheCategory::RenderTarget => &mut report.render_target_textures,
611            };
612            *counter += item.texture.size_in_bytes();
613        }
614
615        report
616    }
617
618    fn update_profile(&self, profile: &mut TransactionProfile) {
619        let mut external_image_bytes = 0;
620        for img in self.external_images.values() {
621            let uv_rect = img.get_uv_rect();
622            let size = (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32();
623
624            // Assume 4 bytes per pixels which is true most of the time but
625            // not always.
626            let bpp = 4;
627            external_image_bytes += size.area() as usize * bpp;
628        }
629
630        profile.set(profiler::EXTERNAL_IMAGE_BYTES, profiler::bytes_to_mb(external_image_bytes));
631    }
632
633    fn get_cache_texture_mut(&mut self, id: &CacheTextureId) -> &mut Texture {
634        &mut self.texture_cache_map
635            .get_mut(id)
636            .expect("bug: texture not allocated")
637            .texture
638    }
639}
640
641#[derive(Debug, Copy, Clone, PartialEq)]
642#[cfg_attr(feature = "capture", derive(Serialize))]
643#[cfg_attr(feature = "replay", derive(Deserialize))]
644pub enum BlendMode {
645    None,
646    Alpha,
647    PremultipliedAlpha,
648    PremultipliedDestOut,
649    SubpixelDualSource,
650    SubpixelConstantTextColor(ColorF),
651    SubpixelWithBgColor,
652    Advanced(MixBlendMode),
653    MultiplyDualSource,
654    Screen,
655    Exclusion,
656}
657
658impl BlendMode {
659    /// Decides when a given mix-blend-mode can be implemented in terms of
660    /// simple blending, dual-source blending, advanced blending, or not at
661    /// all based on available capabilities.
662    pub fn from_mix_blend_mode(
663        mode: MixBlendMode,
664        advanced_blend: bool,
665        coherent: bool,
666        dual_source: bool,
667    ) -> Option<BlendMode> {
668        // If we emulate a mix-blend-mode via simple or dual-source blending,
669        // care must be taken to output alpha As + Ad*(1-As) regardless of what
670        // the RGB output is to comply with the mix-blend-mode spec.
671        Some(match mode {
672            // If we have coherent advanced blend, just use that.
673            _ if advanced_blend && coherent => BlendMode::Advanced(mode),
674            // Screen can be implemented as Cs + Cd - Cs*Cd => Cs + Cd*(1-Cs)
675            MixBlendMode::Screen => BlendMode::Screen,
676            // Exclusion can be implemented as Cs + Cd - 2*Cs*Cd => Cs*(1-Cd) + Cd*(1-Cs)
677            MixBlendMode::Exclusion => BlendMode::Exclusion,
678            // Multiply can be implemented as Cs*Cd + Cs*(1-Ad) + Cd*(1-As) => Cs*(1-Ad) + Cd*(1 - SRC1=(As-Cs))
679            MixBlendMode::Multiply if dual_source => BlendMode::MultiplyDualSource,
680            // Otherwise, use advanced blend without coherency if available.
681            _ if advanced_blend => BlendMode::Advanced(mode),
682            // If advanced blend is not available, then we have to use brush_mix_blend.
683            _ => return None,
684        })
685    }
686}
687
688/// Information about the state of the debugging / profiler overlay in native compositing mode.
689struct DebugOverlayState {
690    /// True if any of the current debug flags will result in drawing a debug overlay.
691    is_enabled: bool,
692
693    /// The current size of the debug overlay surface. None implies that the
694    /// debug surface isn't currently allocated.
695    current_size: Option<DeviceIntSize>,
696}
697
698impl DebugOverlayState {
699    fn new() -> Self {
700        DebugOverlayState {
701            is_enabled: false,
702            current_size: None,
703        }
704    }
705}
706
707/// Tracks buffer damage rects over a series of frames.
708#[derive(Debug, Default)]
709struct BufferDamageTracker {
710    damage_rects: [DeviceRect; 2],
711    current_offset: usize,
712}
713
714impl BufferDamageTracker {
715    /// Sets the damage rect for the current frame. Should only be called *after*
716    /// get_damage_rect() has been called to get the current backbuffer's damage rect.
717    fn push_dirty_rect(&mut self, rect: &DeviceRect) {
718        self.damage_rects[self.current_offset] = rect.clone();
719        self.current_offset = match self.current_offset {
720            0 => self.damage_rects.len() - 1,
721            n => n - 1,
722        }
723    }
724
725    /// Gets the damage rect for the current backbuffer, given the backbuffer's age.
726    /// (The number of frames since it was previously the backbuffer.)
727    /// Returns an empty rect if the buffer is valid, and None if the entire buffer is invalid.
728    fn get_damage_rect(&self, buffer_age: usize) -> Option<DeviceRect> {
729        match buffer_age {
730            // 0 means this is a new buffer, so is completely invalid.
731            0 => None,
732            // 1 means this backbuffer was also the previous frame's backbuffer
733            // (so must have been copied to the frontbuffer). It is therefore entirely valid.
734            1 => Some(DeviceRect::zero()),
735            // We must calculate the union of the damage rects since this buffer was previously
736            // the backbuffer.
737            n if n <= self.damage_rects.len() + 1 => {
738                Some(
739                    self.damage_rects.iter()
740                        .cycle()
741                        .skip(self.current_offset + 1)
742                        .take(n - 1)
743                        .fold(DeviceRect::zero(), |acc, r| acc.union(r))
744                )
745            }
746            // The backbuffer is older than the number of frames for which we track,
747            // so we treat it as entirely invalid.
748            _ => None,
749        }
750    }
751}
752
753/// The renderer is responsible for submitting to the GPU the work prepared by the
754/// RenderBackend.
755///
756/// We have a separate `Renderer` instance for each instance of WebRender (generally
757/// one per OS window), and all instances share the same thread.
758pub struct Renderer {
759    result_rx: Receiver<ResultMsg>,
760    pub device: Device,
761    pending_texture_updates: Vec<TextureUpdateList>,
762    /// True if there are any TextureCacheUpdate pending.
763    pending_texture_cache_updates: bool,
764    pending_native_surface_updates: Vec<NativeSurfaceOperation>,
765    pending_gpu_cache_updates: Vec<GpuCacheUpdateList>,
766    pending_gpu_cache_clear: bool,
767    pending_shader_updates: Vec<PathBuf>,
768    active_documents: FastHashMap<DocumentId, RenderedDocument>,
769
770    shaders: Rc<RefCell<Shaders>>,
771
772    max_recorded_profiles: usize,
773
774    clear_color: ColorF,
775    enable_clear_scissor: bool,
776    enable_advanced_blend_barriers: bool,
777    clear_caches_with_quads: bool,
778    clear_alpha_targets_with_quads: bool,
779
780    debug: debug::LazyInitializedDebugRenderer,
781    debug_flags: DebugFlags,
782    profile: TransactionProfile,
783    frame_counter: u64,
784    resource_upload_time: f64,
785    gpu_cache_upload_time: f64,
786    profiler: Profiler,
787
788    last_time: u64,
789
790    pub gpu_profiler: GpuProfiler,
791    vaos: vertex::RendererVAOs,
792
793    gpu_cache_texture: gpu_cache::GpuCacheTexture,
794    vertex_data_textures: Vec<vertex::VertexDataTextures>,
795    current_vertex_data_textures: usize,
796
797    /// When the GPU cache debugger is enabled, we keep track of the live blocks
798    /// in the GPU cache so that we can use them for the debug display. This
799    /// member stores those live blocks, indexed by row.
800    gpu_cache_debug_chunks: Vec<Vec<GpuCacheDebugChunk>>,
801
802    gpu_cache_frame_id: FrameId,
803    gpu_cache_overflow: bool,
804
805    pipeline_info: PipelineInfo,
806
807    // Manages and resolves source textures IDs to real texture IDs.
808    texture_resolver: TextureResolver,
809
810    texture_upload_pbo_pool: UploadPBOPool,
811    staging_texture_pool: UploadTexturePool,
812
813    dither_matrix_texture: Option<Texture>,
814
815    /// Optional trait object that allows the client
816    /// application to provide external buffers for image data.
817    external_image_handler: Option<Box<dyn ExternalImageHandler>>,
818
819    /// Optional function pointers for measuring memory used by a given
820    /// heap-allocated pointer.
821    size_of_ops: Option<MallocSizeOfOps>,
822
823    pub renderer_errors: Vec<RendererError>,
824
825    pub(in crate) async_frame_recorder: Option<AsyncScreenshotGrabber>,
826    pub(in crate) async_screenshots: Option<AsyncScreenshotGrabber>,
827
828    /// List of profile results from previous frames. Can be retrieved
829    /// via get_frame_profiles().
830    cpu_profiles: VecDeque<CpuProfile>,
831    gpu_profiles: VecDeque<GpuProfile>,
832
833    /// Notification requests to be fulfilled after rendering.
834    notifications: Vec<NotificationRequest>,
835
836    device_size: Option<DeviceIntSize>,
837
838    /// A lazily created texture for the zoom debugging widget.
839    zoom_debug_texture: Option<Texture>,
840
841    /// The current mouse position. This is used for debugging
842    /// functionality only, such as the debug zoom widget.
843    cursor_position: DeviceIntPoint,
844
845    /// Guards to check if we might be rendering a frame with expired texture
846    /// cache entries.
847    shared_texture_cache_cleared: bool,
848
849    /// The set of documents which we've seen a publish for since last render.
850    documents_seen: FastHashSet<DocumentId>,
851
852    #[cfg(feature = "capture")]
853    read_fbo: FBOId,
854    #[cfg(feature = "replay")]
855    owned_external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
856
857    /// The compositing config, affecting how WR composites into the final scene.
858    compositor_config: CompositorConfig,
859
860    current_compositor_kind: CompositorKind,
861
862    /// Maintains a set of allocated native composite surfaces. This allows any
863    /// currently allocated surfaces to be cleaned up as soon as deinit() is
864    /// called (the normal bookkeeping for native surfaces exists in the
865    /// render backend thread).
866    allocated_native_surfaces: FastHashSet<NativeSurfaceId>,
867
868    /// If true, partial present state has been reset and everything needs to
869    /// be drawn on the next render.
870    force_redraw: bool,
871
872    /// State related to the debug / profiling overlays
873    debug_overlay_state: DebugOverlayState,
874
875    /// Tracks the dirty rectangles from previous frames. Used on platforms
876    /// that require keeping the front buffer fully correct when doing
877    /// partial present (e.g. unix desktop with EGL_EXT_buffer_age).
878    buffer_damage_tracker: BufferDamageTracker,
879
880    max_primitive_instance_count: usize,
881    enable_instancing: bool,
882}
883
884#[derive(Debug)]
885pub enum RendererError {
886    Shader(ShaderError),
887    Thread(std::io::Error),
888    Resource(ResourceCacheError),
889    MaxTextureSize,
890    SoftwareRasterizer,
891}
892
893impl From<ShaderError> for RendererError {
894    fn from(err: ShaderError) -> Self {
895        RendererError::Shader(err)
896    }
897}
898
899impl From<std::io::Error> for RendererError {
900    fn from(err: std::io::Error) -> Self {
901        RendererError::Thread(err)
902    }
903}
904
905impl From<ResourceCacheError> for RendererError {
906    fn from(err: ResourceCacheError) -> Self {
907        RendererError::Resource(err)
908    }
909}
910
911impl Renderer {
912    /// Initializes WebRender and creates a `Renderer` and `RenderApiSender`.
913    ///
914    /// # Examples
915    /// Initializes a `Renderer` with some reasonable values. For more information see
916    /// [`RendererOptions`][rendereroptions].
917    ///
918    /// ```rust,ignore
919    /// # use webrender::renderer::Renderer;
920    /// # use std::path::PathBuf;
921    /// let opts = webrender::RendererOptions {
922    ///    device_pixel_ratio: 1.0,
923    ///    resource_override_path: None,
924    ///    enable_aa: false,
925    /// };
926    /// let (renderer, sender) = Renderer::new(opts);
927    /// ```
928    /// [rendereroptions]: struct.RendererOptions.html
929    pub fn new(
930        gl: Rc<gl_context_loader::GenericGlContext>,
931        notifier: Box<dyn RenderNotifier>,
932        mut options: RendererOptions,
933        shaders: Option<&SharedShaders>,
934    ) -> Result<(Self, RenderApiSender), RendererError> {
935        if !wr_has_been_initialized() {
936            // If the profiler feature is enabled, try to load the profiler shared library
937            // if the path was provided.
938            #[cfg(feature = "profiler")]
939            unsafe {
940                if let Ok(ref tracy_path) = std::env::var("WR_TRACY_PATH") {
941                    let ok = tracy_rs::load(tracy_path);
942                    println!("Load tracy from {} -> {}", tracy_path, ok);
943                }
944            }
945
946            register_thread_with_profiler("Compositor".to_owned());
947        }
948
949        HAS_BEEN_INITIALIZED.store(true, Ordering::SeqCst);
950
951        let (api_tx, api_rx) = unbounded_channel();
952        let (result_tx, result_rx) = unbounded_channel();
953        let gl_type = gl.get_type();
954
955        let mut device = Device::new(
956            gl,
957            options.crash_annotator.clone(),
958            options.resource_override_path.clone(),
959            options.use_optimized_shaders,
960            options.upload_method.clone(),
961            options.cached_programs.take(),
962            options.allow_texture_storage_support,
963            options.allow_texture_swizzling,
964            options.dump_shader_source.take(),
965            options.surface_origin_is_top_left,
966            options.panic_on_gl_error,
967        );
968
969        let color_cache_formats = device.preferred_color_formats();
970        let swizzle_settings = device.swizzle_settings();
971        let use_dual_source_blending =
972            device.get_capabilities().supports_dual_source_blending &&
973            options.allow_dual_source_blending;
974        let ext_blend_equation_advanced =
975            options.allow_advanced_blend_equation &&
976            device.get_capabilities().supports_advanced_blend_equation;
977        let ext_blend_equation_advanced_coherent =
978            device.supports_extension("GL_KHR_blend_equation_advanced_coherent");
979
980        // 2048 is the minimum that the texture cache can work with.
981        const MIN_TEXTURE_SIZE: i32 = 2048;
982        let mut max_internal_texture_size = device.max_texture_size();
983        if max_internal_texture_size < MIN_TEXTURE_SIZE {
984            // Broken GL contexts can return a max texture size of zero (See #1260).
985            // Better to gracefully fail now than panic as soon as a texture is allocated.
986            error!(
987                "Device reporting insufficient max texture size ({})",
988                max_internal_texture_size
989            );
990            return Err(RendererError::MaxTextureSize);
991        }
992        if let Some(internal_limit) = options.max_internal_texture_size {
993            assert!(internal_limit >= MIN_TEXTURE_SIZE);
994            max_internal_texture_size = max_internal_texture_size.min(internal_limit);
995        }
996
997        if options.reject_software_rasterizer {
998          let renderer_name_lc = device.get_capabilities().renderer_name.to_lowercase();
999          if renderer_name_lc.contains("llvmpipe") || renderer_name_lc.contains("softpipe") || renderer_name_lc.contains("software rasterizer") {
1000            return Err(RendererError::SoftwareRasterizer);
1001          }
1002        }
1003
1004        let image_tiling_threshold = options.image_tiling_threshold
1005            .min(max_internal_texture_size);
1006
1007        device.begin_frame();
1008
1009        let shaders = match shaders {
1010            Some(shaders) => Rc::clone(shaders),
1011            None => Rc::new(RefCell::new(Shaders::new(&mut device, gl_type, &options)?)),
1012        };
1013
1014        let dither_matrix_texture = if options.enable_dithering {
1015            let dither_matrix: [u8; 64] = [
1016                0,
1017                48,
1018                12,
1019                60,
1020                3,
1021                51,
1022                15,
1023                63,
1024                32,
1025                16,
1026                44,
1027                28,
1028                35,
1029                19,
1030                47,
1031                31,
1032                8,
1033                56,
1034                4,
1035                52,
1036                11,
1037                59,
1038                7,
1039                55,
1040                40,
1041                24,
1042                36,
1043                20,
1044                43,
1045                27,
1046                39,
1047                23,
1048                2,
1049                50,
1050                14,
1051                62,
1052                1,
1053                49,
1054                13,
1055                61,
1056                34,
1057                18,
1058                46,
1059                30,
1060                33,
1061                17,
1062                45,
1063                29,
1064                10,
1065                58,
1066                6,
1067                54,
1068                9,
1069                57,
1070                5,
1071                53,
1072                42,
1073                26,
1074                38,
1075                22,
1076                41,
1077                25,
1078                37,
1079                21,
1080            ];
1081
1082            let texture = device.create_texture(
1083                ImageBufferKind::Texture2D,
1084                ImageFormat::R8,
1085                8,
1086                8,
1087                TextureFilter::Nearest,
1088                None,
1089            );
1090            device.upload_texture_immediate(&texture, &dither_matrix);
1091
1092            Some(texture)
1093        } else {
1094            None
1095        };
1096
1097        let max_primitive_instance_count =
1098            RendererOptions::MAX_INSTANCE_BUFFER_SIZE / mem::size_of::<PrimitiveInstanceData>();
1099        let vaos = vertex::RendererVAOs::new(
1100            &mut device,
1101            if options.enable_instancing { None } else { NonZeroUsize::new(max_primitive_instance_count) },
1102        );
1103
1104        let texture_upload_pbo_pool = UploadPBOPool::new(&mut device, options.upload_pbo_default_size);
1105        let staging_texture_pool = UploadTexturePool::new();
1106        let texture_resolver = TextureResolver::new(&mut device);
1107
1108        let mut vertex_data_textures = Vec::new();
1109        for _ in 0 .. VERTEX_DATA_TEXTURE_COUNT {
1110            vertex_data_textures.push(vertex::VertexDataTextures::new());
1111        }
1112
1113        // On some (mostly older, integrated) GPUs, the normal GPU texture cache update path
1114        // doesn't work well when running on ANGLE, causing CPU stalls inside D3D and/or the
1115        // GPU driver. See https://bugzilla.mozilla.org/show_bug.cgi?id=1576637 for much
1116        // more detail. To reduce the number of code paths we have active that require testing,
1117        // we will enable the GPU cache scatter update path on all devices running with ANGLE.
1118        // We want a better solution long-term, but for now this is a significant performance
1119        // improvement on HD4600 era GPUs, and shouldn't hurt performance in a noticeable
1120        // way on other systems running under ANGLE.
1121        let is_software = device.get_capabilities().renderer_name.starts_with("Software");
1122
1123        // On other GL platforms, like macOS or Android, creating many PBOs is very inefficient.
1124        // This is what happens in GPU cache updates in PBO path. Instead, we switch everything
1125        // except software GL to use the GPU scattered updates.
1126        let supports_scatter = device.get_capabilities().supports_color_buffer_float;
1127        let gpu_cache_texture = gpu_cache::GpuCacheTexture::new(
1128            &mut device,
1129            supports_scatter && !is_software,
1130        )?;
1131
1132        device.end_frame();
1133
1134        let backend_notifier = notifier.clone();
1135
1136        let clear_alpha_targets_with_quads = !device.get_capabilities().supports_alpha_target_clears;
1137
1138        let prefer_subpixel_aa = options.force_subpixel_aa || (options.enable_subpixel_aa && use_dual_source_blending);
1139        let default_font_render_mode = match (options.enable_aa, prefer_subpixel_aa) {
1140            (true, true) => FontRenderMode::Subpixel,
1141            (true, false) => FontRenderMode::Alpha,
1142            (false, _) => FontRenderMode::Mono,
1143        };
1144
1145        let compositor_kind = match options.compositor_config {
1146            CompositorConfig::Draw { max_partial_present_rects, draw_previous_partial_present_regions, .. } => {
1147                CompositorKind::Draw { max_partial_present_rects, draw_previous_partial_present_regions }
1148            }
1149            CompositorConfig::Native { ref compositor } => {
1150                let capabilities = compositor.get_capabilities();
1151
1152                CompositorKind::Native {
1153                    capabilities,
1154                }
1155            }
1156        };
1157
1158        let config = FrameBuilderConfig {
1159            default_font_render_mode,
1160            dual_source_blending_is_enabled: true,
1161            dual_source_blending_is_supported: use_dual_source_blending,
1162            chase_primitive: options.chase_primitive,
1163            testing: options.testing,
1164            gpu_supports_fast_clears: options.gpu_supports_fast_clears,
1165            gpu_supports_advanced_blend: ext_blend_equation_advanced,
1166            advanced_blend_is_coherent: ext_blend_equation_advanced_coherent,
1167            gpu_supports_render_target_partial_update: device.get_capabilities().supports_render_target_partial_update,
1168            external_images_require_copy: !device.get_capabilities().supports_image_external_essl3,
1169            batch_lookback_count: RendererOptions::BATCH_LOOKBACK_COUNT,
1170            background_color: Some(options.clear_color),
1171            compositor_kind,
1172            tile_size_override: None,
1173            max_depth_ids: device.max_depth_ids(),
1174            max_target_size: max_internal_texture_size,
1175            force_invalidation: false,
1176            is_software,
1177            low_quality_pinch_zoom: options.low_quality_pinch_zoom,
1178        };
1179        info!("WR {:?}", config);
1180
1181        let debug_flags = options.debug_flags;
1182        let size_of_op = options.size_of_op;
1183        let enclosing_size_of_op = options.enclosing_size_of_op;
1184        let make_size_of_ops =
1185            move || size_of_op.map(|o| MallocSizeOfOps::new(o, enclosing_size_of_op));
1186        let workers = options
1187            .workers
1188            .take()
1189            .unwrap_or_else(|| {
1190                let worker = ThreadPoolBuilder::new()
1191                    .thread_name(|idx|{ format!("WRWorker#{}", idx) })
1192                    .start_handler(move |idx| {
1193                        register_thread_with_profiler(format!("WRWorker#{}", idx));
1194                        profiler::register_thread(&format!("WRWorker#{}", idx));
1195                    })
1196                    .exit_handler(move |_idx| {
1197                        profiler::unregister_thread();
1198                    })
1199                    .build();
1200                Arc::new(worker.unwrap())
1201            });
1202        let sampler = options.sampler;
1203        let namespace_alloc_by_client = options.namespace_alloc_by_client;
1204
1205        let font_instances = SharedFontInstanceMap::new();
1206
1207        let blob_image_handler = options.blob_image_handler.take();
1208        let scene_builder_hooks = options.scene_builder_hooks;
1209        let rb_thread_name = format!("WRRenderBackend#{}", options.renderer_id.unwrap_or(0));
1210        let scene_thread_name = format!("WRSceneBuilder#{}", options.renderer_id.unwrap_or(0));
1211        let lp_scene_thread_name = format!("WRSceneBuilderLP#{}", options.renderer_id.unwrap_or(0));
1212        let glyph_rasterizer = GlyphRasterizer::new(workers, device.get_capabilities().supports_r8_texture_upload)?;
1213
1214        let (scene_builder_channels, scene_tx) =
1215            SceneBuilderThreadChannels::new(api_tx.clone());
1216
1217        let sb_font_instances = font_instances.clone();
1218
1219        thread::Builder::new().name(scene_thread_name.clone()).spawn(move || {
1220            register_thread_with_profiler(scene_thread_name.clone());
1221            profiler::register_thread(&scene_thread_name);
1222
1223            let mut scene_builder = SceneBuilderThread::new(
1224                config,
1225                sb_font_instances,
1226                make_size_of_ops(),
1227                scene_builder_hooks,
1228                scene_builder_channels,
1229            );
1230            scene_builder.run();
1231
1232            profiler::unregister_thread();
1233        })?;
1234
1235        let low_priority_scene_tx = if options.support_low_priority_transactions {
1236            let (low_priority_scene_tx, low_priority_scene_rx) = unbounded_channel();
1237            let lp_builder = LowPrioritySceneBuilderThread {
1238                rx: low_priority_scene_rx,
1239                tx: scene_tx.clone(),
1240            };
1241
1242            thread::Builder::new().name(lp_scene_thread_name.clone()).spawn(move || {
1243                register_thread_with_profiler(lp_scene_thread_name.clone());
1244                profiler::register_thread(&lp_scene_thread_name);
1245
1246                let mut scene_builder = lp_builder;
1247                scene_builder.run();
1248
1249                profiler::unregister_thread();
1250            })?;
1251
1252            low_priority_scene_tx
1253        } else {
1254            scene_tx.clone()
1255        };
1256
1257        let backend_blob_handler = blob_image_handler
1258            .as_ref()
1259            .map(|handler| handler.create_similar());
1260
1261        let texture_cache_config = options.texture_cache_config.clone();
1262        let mut picture_tile_size = options.picture_tile_size.unwrap_or(picture::TILE_SIZE_DEFAULT);
1263        // Clamp the picture tile size to reasonable values.
1264        picture_tile_size.width = picture_tile_size.width.max(128).min(4096);
1265        picture_tile_size.height = picture_tile_size.height.max(128).min(4096);
1266
1267        let picture_texture_filter = if options.low_quality_pinch_zoom {
1268            TextureFilter::Linear
1269        } else {
1270            TextureFilter::Nearest
1271        };
1272
1273        let rb_scene_tx = scene_tx.clone();
1274        let rb_font_instances = font_instances.clone();
1275        let enable_multithreading = options.enable_multithreading;
1276        thread::Builder::new().name(rb_thread_name.clone()).spawn(move || {
1277            register_thread_with_profiler(rb_thread_name.clone());
1278            profiler::register_thread(&rb_thread_name);
1279
1280            let texture_cache = TextureCache::new(
1281                max_internal_texture_size,
1282                image_tiling_threshold,
1283                picture_tile_size,
1284                color_cache_formats,
1285                swizzle_settings,
1286                &texture_cache_config,
1287                picture_texture_filter,
1288            );
1289
1290            let glyph_cache = GlyphCache::new();
1291
1292            let mut resource_cache = ResourceCache::new(
1293                texture_cache,
1294                glyph_rasterizer,
1295                glyph_cache,
1296                rb_font_instances,
1297            );
1298
1299            resource_cache.enable_multithreading(enable_multithreading);
1300
1301            let mut backend = RenderBackend::new(
1302                api_rx,
1303                result_tx,
1304                rb_scene_tx,
1305                resource_cache,
1306                backend_notifier,
1307                backend_blob_handler,
1308                config,
1309                sampler,
1310                make_size_of_ops(),
1311                debug_flags,
1312                namespace_alloc_by_client,
1313            );
1314            backend.run();
1315            profiler::unregister_thread();
1316        })?;
1317
1318        let debug_method = if !options.enable_gpu_markers {
1319            // The GPU markers are disabled.
1320            GpuDebugMethod::None
1321        } else if device.supports_extension("GL_KHR_debug") {
1322            GpuDebugMethod::KHR
1323        } else if device.supports_extension("GL_EXT_debug_marker") {
1324            GpuDebugMethod::MarkerEXT
1325        } else {
1326            println!("Warning: asking to enable_gpu_markers but no supporting extension was found");
1327            GpuDebugMethod::None
1328        };
1329
1330        info!("using {:?}", debug_method);
1331
1332        let gpu_profiler = GpuProfiler::new(Rc::clone(device.rc_gl()), debug_method);
1333        #[cfg(feature = "capture")]
1334        let read_fbo = device.create_fbo();
1335
1336        let mut renderer = Renderer {
1337            result_rx,
1338            device,
1339            active_documents: FastHashMap::default(),
1340            pending_texture_updates: Vec::new(),
1341            pending_texture_cache_updates: false,
1342            pending_native_surface_updates: Vec::new(),
1343            pending_gpu_cache_updates: Vec::new(),
1344            pending_gpu_cache_clear: false,
1345            pending_shader_updates: Vec::new(),
1346            shaders,
1347            debug: debug::LazyInitializedDebugRenderer::new(),
1348            debug_flags: DebugFlags::empty(),
1349            profile: TransactionProfile::new(),
1350            frame_counter: 0,
1351            resource_upload_time: 0.0,
1352            gpu_cache_upload_time: 0.0,
1353            profiler: Profiler::new(),
1354            max_recorded_profiles: options.max_recorded_profiles,
1355            clear_color: options.clear_color,
1356            enable_clear_scissor: options.enable_clear_scissor,
1357            enable_advanced_blend_barriers: !ext_blend_equation_advanced_coherent,
1358            clear_caches_with_quads: options.clear_caches_with_quads,
1359            clear_alpha_targets_with_quads,
1360            last_time: 0,
1361            gpu_profiler,
1362            vaos,
1363            vertex_data_textures,
1364            current_vertex_data_textures: 0,
1365            pipeline_info: PipelineInfo::default(),
1366            dither_matrix_texture,
1367            external_image_handler: None,
1368            size_of_ops: make_size_of_ops(),
1369            cpu_profiles: VecDeque::new(),
1370            gpu_profiles: VecDeque::new(),
1371            gpu_cache_texture,
1372            gpu_cache_debug_chunks: Vec::new(),
1373            gpu_cache_frame_id: FrameId::INVALID,
1374            gpu_cache_overflow: false,
1375            texture_upload_pbo_pool,
1376            staging_texture_pool,
1377            texture_resolver,
1378            renderer_errors: Vec::new(),
1379            async_frame_recorder: None,
1380            async_screenshots: None,
1381            #[cfg(feature = "capture")]
1382            read_fbo,
1383            #[cfg(feature = "replay")]
1384            owned_external_images: FastHashMap::default(),
1385            notifications: Vec::new(),
1386            device_size: None,
1387            zoom_debug_texture: None,
1388            cursor_position: DeviceIntPoint::zero(),
1389            shared_texture_cache_cleared: false,
1390            documents_seen: FastHashSet::default(),
1391            force_redraw: true,
1392            compositor_config: options.compositor_config,
1393            current_compositor_kind: compositor_kind,
1394            allocated_native_surfaces: FastHashSet::default(),
1395            debug_overlay_state: DebugOverlayState::new(),
1396            buffer_damage_tracker: BufferDamageTracker::default(),
1397            max_primitive_instance_count,
1398            enable_instancing: options.enable_instancing,
1399        };
1400
1401        // We initially set the flags to default and then now call set_debug_flags
1402        // to ensure any potential transition when enabling a flag is run.
1403        renderer.set_debug_flags(debug_flags);
1404
1405        let sender = RenderApiSender::new(
1406            api_tx,
1407            scene_tx,
1408            low_priority_scene_tx,
1409            blob_image_handler,
1410            font_instances,
1411        );
1412        Ok((renderer, sender))
1413    }
1414
1415    pub fn device_size(&self) -> Option<DeviceIntSize> {
1416        self.device_size
1417    }
1418
1419    /// Update the current position of the debug cursor.
1420    pub fn set_cursor_position(
1421        &mut self,
1422        position: DeviceIntPoint,
1423    ) {
1424        self.cursor_position = position;
1425    }
1426
1427    pub fn get_max_texture_size(&self) -> i32 {
1428        self.device.max_texture_size()
1429    }
1430
1431    pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
1432        GraphicsApiInfo {
1433            kind: GraphicsApi::OpenGL,
1434            version: self.device.gl().get_string(gl::VERSION),
1435            renderer: self.device.gl().get_string(gl::RENDERER),
1436        }
1437    }
1438
1439    pub fn preferred_color_format(&self) -> ImageFormat {
1440        self.device.preferred_color_formats().external
1441    }
1442
1443    pub fn required_texture_stride_alignment(&self, format: ImageFormat) -> usize {
1444        self.device.required_pbo_stride().num_bytes(format).get()
1445    }
1446
1447    pub fn set_clear_color(&mut self, color: ColorF) {
1448        self.clear_color = color;
1449    }
1450
1451    pub fn flush_pipeline_info(&mut self) -> PipelineInfo {
1452        mem::replace(&mut self.pipeline_info, PipelineInfo::default())
1453    }
1454
1455    /// Returns the Epoch of the current frame in a pipeline.
1456    pub fn current_epoch(&self, document_id: DocumentId, pipeline_id: PipelineId) -> Option<Epoch> {
1457        self.pipeline_info.epochs.get(&(pipeline_id, document_id)).cloned()
1458    }
1459
1460    /// Processes the result queue.
1461    ///
1462    /// Should be called before `render()`, as texture cache updates are done here.
1463    pub fn update(&mut self) {
1464        profile_scope!("update");
1465
1466        // Pull any pending results and return the most recent.
1467        while let Ok(msg) = self.result_rx.try_recv() {
1468            match msg {
1469                ResultMsg::PublishPipelineInfo(mut pipeline_info) => {
1470                    for ((pipeline_id, document_id), epoch) in pipeline_info.epochs {
1471                        self.pipeline_info.epochs.insert((pipeline_id, document_id), epoch);
1472                    }
1473                    self.pipeline_info.removed_pipelines.extend(pipeline_info.removed_pipelines.drain(..));
1474                }
1475                ResultMsg::PublishDocument(
1476                    document_id,
1477                    mut doc,
1478                    resource_update_list,
1479                ) => {
1480                    // Add a new document to the active set
1481
1482                    // If the document we are replacing must be drawn (in order to
1483                    // update the texture cache), issue a render just to
1484                    // off-screen targets, ie pass None to render_impl. We do this
1485                    // because a) we don't need to render to the main framebuffer
1486                    // so it is cheaper not to, and b) doing so without a
1487                    // subsequent present would break partial present.
1488                    if let Some(mut prev_doc) = self.active_documents.remove(&document_id) {
1489                        doc.profile.merge(&mut prev_doc.profile);
1490
1491                        if prev_doc.frame.must_be_drawn() {
1492                            self.render_impl(
1493                                document_id,
1494                                &mut prev_doc,
1495                                None,
1496                                0,
1497                            ).ok();
1498                        }
1499                    }
1500
1501                    self.active_documents.insert(document_id, doc);
1502
1503                    // IMPORTANT: The pending texture cache updates must be applied
1504                    //            *after* the previous frame has been rendered above
1505                    //            (if neceessary for a texture cache update). For
1506                    //            an example of why this is required:
1507                    //            1) Previous frame contains a render task that
1508                    //               targets Texture X.
1509                    //            2) New frame contains a texture cache update which
1510                    //               frees Texture X.
1511                    //            3) bad stuff happens.
1512
1513                    //TODO: associate `document_id` with target window
1514                    self.pending_texture_cache_updates |= !resource_update_list.texture_updates.updates.is_empty();
1515                    self.pending_texture_updates.push(resource_update_list.texture_updates);
1516                    self.pending_native_surface_updates.extend(resource_update_list.native_surface_updates);
1517                    self.documents_seen.insert(document_id);
1518                }
1519                ResultMsg::UpdateGpuCache(mut list) => {
1520                    if list.clear {
1521                        self.pending_gpu_cache_clear = true;
1522                    }
1523                    if list.clear {
1524                        self.gpu_cache_debug_chunks = Vec::new();
1525                    }
1526                    for cmd in mem::replace(&mut list.debug_commands, Vec::new()) {
1527                        match cmd {
1528                            GpuCacheDebugCmd::Alloc(chunk) => {
1529                                let row = chunk.address.v as usize;
1530                                if row >= self.gpu_cache_debug_chunks.len() {
1531                                    self.gpu_cache_debug_chunks.resize(row + 1, Vec::new());
1532                                }
1533                                self.gpu_cache_debug_chunks[row].push(chunk);
1534                            },
1535                            GpuCacheDebugCmd::Free(address) => {
1536                                let chunks = &mut self.gpu_cache_debug_chunks[address.v as usize];
1537                                let pos = chunks.iter()
1538                                    .position(|x| x.address == address).unwrap();
1539                                chunks.remove(pos);
1540                            },
1541                        }
1542                    }
1543                    self.pending_gpu_cache_updates.push(list);
1544                }
1545                ResultMsg::UpdateResources {
1546                    resource_updates,
1547                    memory_pressure,
1548                } => {
1549                    if memory_pressure {
1550                        // If a memory pressure event arrives _after_ a new scene has
1551                        // been published that writes persistent targets (i.e. cached
1552                        // render tasks to the texture cache, or picture cache tiles)
1553                        // but _before_ the next update/render loop, those targets
1554                        // will not be updated due to the active_documents list being
1555                        // cleared at the end of this message. To work around that,
1556                        // if any of the existing documents have not rendered yet, and
1557                        // have picture/texture cache targets, force a render so that
1558                        // those targets are updated.
1559                        let active_documents = mem::replace(
1560                            &mut self.active_documents,
1561                            FastHashMap::default(),
1562                        );
1563                        for (doc_id, mut doc) in active_documents {
1564                            if doc.frame.must_be_drawn() {
1565                                // As this render will not be presented, we must pass None to
1566                                // render_impl. This avoids interfering with partial present
1567                                // logic, as well as being more efficient.
1568                                self.render_impl(
1569                                    doc_id,
1570                                    &mut doc,
1571                                    None,
1572                                    0,
1573                                ).ok();
1574                            }
1575                        }
1576                    }
1577
1578                    self.pending_texture_cache_updates |= !resource_updates.texture_updates.updates.is_empty();
1579                    self.pending_texture_updates.push(resource_updates.texture_updates);
1580                    self.pending_native_surface_updates.extend(resource_updates.native_surface_updates);
1581                    self.device.begin_frame();
1582
1583                    self.update_texture_cache();
1584                    self.update_native_surfaces();
1585
1586                    // Flush the render target pool on memory pressure.
1587                    //
1588                    // This needs to be separate from the block below because
1589                    // the device module asserts if we delete textures while
1590                    // not in a frame.
1591                    if memory_pressure {
1592                        self.texture_upload_pbo_pool.on_memory_pressure(&mut self.device);
1593                        self.staging_texture_pool.delete_textures(&mut self.device);
1594                    }
1595
1596                    self.device.end_frame();
1597                }
1598                ResultMsg::AppendNotificationRequests(mut notifications) => {
1599                    // We need to know specifically if there are any pending
1600                    // TextureCacheUpdate updates in any of the entries in
1601                    // pending_texture_updates. They may simply be nops, which do not
1602                    // need to prevent issuing the notification, and if so, may not
1603                    // cause a timely frame render to occur to wake up any listeners.
1604                    if !self.pending_texture_cache_updates {
1605                        drain_filter(
1606                            &mut notifications,
1607                            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
1608                            |n| { n.notify(); },
1609                        );
1610                    }
1611                    self.notifications.append(&mut notifications);
1612                }
1613                ResultMsg::ForceRedraw => {
1614                    self.force_redraw = true;
1615                }
1616                ResultMsg::RefreshShader(path) => {
1617                    self.pending_shader_updates.push(path);
1618                }
1619                ResultMsg::DebugOutput(output) => match output {
1620                    #[cfg(feature = "capture")]
1621                    DebugOutput::SaveCapture(config, deferred) => {
1622                        self.save_capture(config, deferred);
1623                    }
1624                    #[cfg(feature = "replay")]
1625                    DebugOutput::LoadCapture(config, plain_externals) => {
1626                        self.active_documents.clear();
1627                        self.load_capture(config, plain_externals);
1628                    }
1629                },
1630                ResultMsg::DebugCommand(command) => {
1631                    self.handle_debug_command(command);
1632                }
1633            }
1634        }
1635    }
1636
1637    fn handle_debug_command(&mut self, command: DebugCommand) {
1638        match command {
1639            DebugCommand::EnableDualSourceBlending(_) |
1640            DebugCommand::SetPictureTileSize(_) => {
1641                panic!("Should be handled by render backend");
1642            }
1643            DebugCommand::SaveCapture(..) |
1644            DebugCommand::LoadCapture(..) |
1645            DebugCommand::StartCaptureSequence(..) |
1646            DebugCommand::StopCaptureSequence => {
1647                panic!("Capture commands are not welcome here! Did you build with 'capture' feature?")
1648            }
1649            DebugCommand::ClearCaches(_)
1650            | DebugCommand::SimulateLongSceneBuild(_)
1651            | DebugCommand::EnableNativeCompositor(_)
1652            | DebugCommand::SetBatchingLookback(_)
1653            | DebugCommand::EnableMultithreading(_) => {}
1654            DebugCommand::InvalidateGpuCache => {
1655                self.gpu_cache_texture.invalidate();
1656            }
1657            DebugCommand::SetFlags(flags) => {
1658                self.set_debug_flags(flags);
1659            }
1660        }
1661    }
1662
1663    /// Set a callback for handling external images.
1664    pub fn set_external_image_handler(&mut self, handler: Box<dyn ExternalImageHandler>) {
1665        self.external_image_handler = Some(handler);
1666    }
1667
1668    /// Retrieve (and clear) the current list of recorded frame profiles.
1669    pub fn get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>) {
1670        let cpu_profiles = self.cpu_profiles.drain(..).collect();
1671        let gpu_profiles = self.gpu_profiles.drain(..).collect();
1672        (cpu_profiles, gpu_profiles)
1673    }
1674
1675    /// Reset the current partial present state. This forces the entire framebuffer
1676    /// to be refreshed next time `render` is called.
1677    pub fn force_redraw(&mut self) {
1678        self.force_redraw = true;
1679    }
1680
1681    /// Renders the current frame.
1682    ///
1683    /// A Frame is supplied by calling [`generate_frame()`][webrender_api::Transaction::generate_frame].
1684    /// buffer_age is the age of the current backbuffer. It is only relevant if partial present
1685    /// is active, otherwise 0 should be passed here.
1686    pub fn render(
1687        &mut self,
1688        device_size: DeviceIntSize,
1689        buffer_age: usize,
1690    ) -> Result<RenderResults, Vec<RendererError>> {
1691        self.device_size = Some(device_size);
1692
1693        // TODO(gw): We want to make the active document that is
1694        //           being rendered configurable via the public
1695        //           API in future. For now, just select the last
1696        //           added document as the active one to render
1697        //           (Gecko only ever creates a single document
1698        //           per renderer right now).
1699        let doc_id = self.active_documents.keys().last().cloned();
1700
1701        let result = match doc_id {
1702            Some(doc_id) => {
1703                // Remove the doc from the map to appease the borrow checker
1704                let mut doc = self.active_documents
1705                    .remove(&doc_id)
1706                    .unwrap();
1707
1708                let result = self.render_impl(
1709                    doc_id,
1710                    &mut doc,
1711                    Some(device_size),
1712                    buffer_age,
1713                );
1714
1715                self.active_documents.insert(doc_id, doc);
1716
1717                result
1718            }
1719            None => {
1720                self.last_time = precise_time_ns();
1721                Ok(RenderResults::default())
1722            }
1723        };
1724
1725        drain_filter(
1726            &mut self.notifications,
1727            |n| { n.when() == Checkpoint::FrameRendered },
1728            |n| { n.notify(); },
1729        );
1730
1731        // This is the end of the rendering pipeline. If some notifications are is still there,
1732        // just clear them and they will autimatically fire the Checkpoint::TransactionDropped
1733        // event. Otherwise they would just pile up in this vector forever.
1734        self.notifications.clear();
1735
1736        tracy_frame_marker!();
1737
1738        result
1739    }
1740
1741    /// Update the state of any debug / profiler overlays. This is currently only needed
1742    /// when running with the native compositor enabled.
1743    fn update_debug_overlay(
1744        &mut self,
1745        framebuffer_size: DeviceIntSize,
1746        has_debug_items: bool,
1747    ) {
1748        // If any of the following debug flags are set, something will be drawn on the debug overlay.
1749        self.debug_overlay_state.is_enabled = has_debug_items || self.debug_flags.intersects(
1750            DebugFlags::PROFILER_DBG |
1751            DebugFlags::RENDER_TARGET_DBG |
1752            DebugFlags::TEXTURE_CACHE_DBG |
1753            DebugFlags::EPOCHS |
1754            DebugFlags::GPU_CACHE_DBG |
1755            DebugFlags::PICTURE_CACHING_DBG |
1756            DebugFlags::PRIMITIVE_DBG |
1757            DebugFlags::ZOOM_DBG
1758        );
1759
1760        // Update the debug overlay surface, if we are running in native compositor mode.
1761        if let CompositorKind::Native { .. } = self.current_compositor_kind {
1762            let compositor = self.compositor_config.compositor().unwrap();
1763
1764            // If there is a current surface, destroy it if we don't need it for this frame, or if
1765            // the size has changed.
1766            if let Some(current_size) = self.debug_overlay_state.current_size {
1767                if !self.debug_overlay_state.is_enabled || current_size != framebuffer_size {
1768                    compositor.destroy_surface(NativeSurfaceId::DEBUG_OVERLAY);
1769                    self.debug_overlay_state.current_size = None;
1770                }
1771            }
1772
1773            // Allocate a new surface, if we need it and there isn't one.
1774            if self.debug_overlay_state.is_enabled && self.debug_overlay_state.current_size.is_none() {
1775                compositor.create_surface(
1776                    NativeSurfaceId::DEBUG_OVERLAY,
1777                    DeviceIntPoint::zero(),
1778                    framebuffer_size,
1779                    false,
1780                );
1781                compositor.create_tile(
1782                    NativeTileId::DEBUG_OVERLAY,
1783                );
1784                self.debug_overlay_state.current_size = Some(framebuffer_size);
1785            }
1786        }
1787    }
1788
1789    /// Bind a draw target for the debug / profiler overlays, if required.
1790    fn bind_debug_overlay(&mut self, device_size: DeviceIntSize) -> Option<DrawTarget> {
1791        // Debug overlay setup are only required in native compositing mode
1792        if self.debug_overlay_state.is_enabled {
1793            if let CompositorKind::Native { .. } = self.current_compositor_kind {
1794                let compositor = self.compositor_config.compositor().unwrap();
1795                let surface_size = self.debug_overlay_state.current_size.unwrap();
1796
1797                // Ensure old surface is invalidated before binding
1798                compositor.invalidate_tile(
1799                    NativeTileId::DEBUG_OVERLAY,
1800                    DeviceIntRect::from_size(surface_size),
1801                );
1802                // Bind the native surface
1803                let surface_info = compositor.bind(
1804                    NativeTileId::DEBUG_OVERLAY,
1805                    DeviceIntRect::from_size(surface_size),
1806                    DeviceIntRect::from_size(surface_size),
1807                );
1808
1809                // Bind the native surface to current FBO target
1810                let draw_target = DrawTarget::NativeSurface {
1811                    offset: surface_info.origin,
1812                    external_fbo_id: surface_info.fbo_id,
1813                    dimensions: surface_size,
1814                };
1815                self.device.bind_draw_target(draw_target);
1816
1817                // When native compositing, clear the debug overlay each frame.
1818                self.device.clear_target(
1819                    Some([0.0, 0.0, 0.0, 0.0]),
1820                    None, // debug renderer does not use depth
1821                    None,
1822                );
1823
1824                Some(draw_target)
1825            } else {
1826                // If we're not using the native compositor, then the default
1827                // frame buffer is already bound. Create a DrawTarget for it and
1828                // return it.
1829                Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
1830            }
1831        } else {
1832            None
1833        }
1834    }
1835
1836    /// Unbind the draw target for debug / profiler overlays, if required.
1837    fn unbind_debug_overlay(&mut self) {
1838        // Debug overlay setup are only required in native compositing mode
1839        if self.debug_overlay_state.is_enabled {
1840            if let CompositorKind::Native { .. } = self.current_compositor_kind {
1841                let compositor = self.compositor_config.compositor().unwrap();
1842                // Unbind the draw target and add it to the visual tree to be composited
1843                compositor.unbind();
1844
1845                compositor.add_surface(
1846                    NativeSurfaceId::DEBUG_OVERLAY,
1847                    CompositorSurfaceTransform::identity(),
1848                    DeviceIntRect::from_size(
1849                        self.debug_overlay_state.current_size.unwrap(),
1850                    ),
1851                    ImageRendering::Auto,
1852                );
1853            }
1854        }
1855    }
1856
1857    // If device_size is None, don't render to the main frame buffer. This is useful to
1858    // update texture cache render tasks but avoid doing a full frame render. If the
1859    // render is not going to be presented, then this must be set to None, as performing a
1860    // composite without a present will confuse partial present.
1861    fn render_impl(
1862        &mut self,
1863        doc_id: DocumentId,
1864        active_doc: &mut RenderedDocument,
1865        device_size: Option<DeviceIntSize>,
1866        buffer_age: usize,
1867    ) -> Result<RenderResults, Vec<RendererError>> {
1868        profile_scope!("render");
1869        let mut results = RenderResults::default();
1870        self.profile.start_time(profiler::RENDERER_TIME);
1871
1872        self.staging_texture_pool.begin_frame();
1873
1874        let compositor_kind = active_doc.frame.composite_state.compositor_kind;
1875        // CompositorKind is updated
1876        if self.current_compositor_kind != compositor_kind {
1877            let enable = match (self.current_compositor_kind, compositor_kind) {
1878                (CompositorKind::Native { .. }, CompositorKind::Draw { .. }) => {
1879                    if self.debug_overlay_state.current_size.is_some() {
1880                        self.compositor_config
1881                            .compositor()
1882                            .unwrap()
1883                            .destroy_surface(NativeSurfaceId::DEBUG_OVERLAY);
1884                        self.debug_overlay_state.current_size = None;
1885                    }
1886                    false
1887                }
1888                (CompositorKind::Draw { .. }, CompositorKind::Native { .. }) => {
1889                    true
1890                }
1891                (current_compositor_kind, active_doc_compositor_kind) => {
1892                    warn!("Compositor mismatch, assuming this is Wrench running. Current {:?}, active {:?}",
1893                        current_compositor_kind, active_doc_compositor_kind);
1894                    false
1895                }
1896            };
1897
1898            if let Some(config) = self.compositor_config.compositor() {
1899                config.enable_native_compositor(enable);
1900            }
1901            self.current_compositor_kind = compositor_kind;
1902        }
1903
1904        // The texture resolver scope should be outside of any rendering, including
1905        // debug rendering. This ensures that when we return render targets to the
1906        // pool via glInvalidateFramebuffer, we don't do any debug rendering after
1907        // that point. Otherwise, the bind / invalidate / bind logic trips up the
1908        // render pass logic in tiled / mobile GPUs, resulting in an extra copy /
1909        // resolve step when the debug overlay is enabled.
1910        self.texture_resolver.begin_frame();
1911
1912        if let Some(device_size) = device_size {
1913            self.update_gpu_profile(device_size);
1914        }
1915
1916        let cpu_frame_id = {
1917            let _gm = self.gpu_profiler.start_marker("begin frame");
1918            let frame_id = self.device.begin_frame();
1919            self.gpu_profiler.begin_frame(frame_id);
1920
1921            self.device.disable_scissor();
1922            self.device.disable_depth();
1923            self.set_blend(false, FramebufferKind::Main);
1924            //self.update_shaders();
1925
1926            self.update_texture_cache();
1927            self.update_native_surfaces();
1928
1929            frame_id
1930        };
1931
1932        if let Some(device_size) = device_size {
1933            // Inform the client that we are starting a composition transaction if native
1934            // compositing is enabled. This needs to be done early in the frame, so that
1935            // we can create debug overlays after drawing the main surfaces.
1936            if let CompositorKind::Native { .. } = self.current_compositor_kind {
1937                let compositor = self.compositor_config.compositor().unwrap();
1938                compositor.begin_frame();
1939            }
1940
1941            // Update the state of the debug overlay surface, ensuring that
1942            // the compositor mode has a suitable surface to draw to, if required.
1943            self.update_debug_overlay(device_size, !active_doc.frame.debug_items.is_empty());
1944        }
1945
1946        let frame = &mut active_doc.frame;
1947        let profile = &mut active_doc.profile;
1948        assert!(self.current_compositor_kind == frame.composite_state.compositor_kind);
1949
1950        if self.shared_texture_cache_cleared {
1951            assert!(self.documents_seen.contains(&doc_id),
1952                    "Cleared texture cache without sending new document frame.");
1953        }
1954
1955        match self.prepare_gpu_cache(&frame.deferred_resolves) {
1956            Ok(..) => {
1957                assert!(frame.gpu_cache_frame_id <= self.gpu_cache_frame_id,
1958                    "Received frame depends on a later GPU cache epoch ({:?}) than one we received last via `UpdateGpuCache` ({:?})",
1959                    frame.gpu_cache_frame_id, self.gpu_cache_frame_id);
1960
1961                {
1962                    profile_scope!("gl.flush");
1963                    self.device.gl().flush();  // early start on gpu cache updates
1964                }
1965
1966                self.draw_frame(
1967                    frame,
1968                    device_size,
1969                    buffer_age,
1970                    &mut results,
1971                );
1972
1973                // TODO(nical): do this automatically by selecting counters in the wr profiler
1974                // Profile marker for the number of invalidated picture cache
1975                if thread_is_being_profiled() {
1976                    let duration = Duration::new(0,0);
1977                    if let Some(n) = self.profile.get(profiler::RENDERED_PICTURE_TILES) {
1978                        let message = (n as usize).to_string();
1979                        add_text_marker(cstr!("NumPictureCacheInvalidated"), &message, duration);
1980                    }
1981                }
1982
1983                if device_size.is_some() {
1984                    self.draw_frame_debug_items(&frame.debug_items);
1985                }
1986
1987                self.profile.merge(profile);
1988            }
1989            Err(e) => {
1990                self.renderer_errors.push(e);
1991            }
1992        }
1993
1994        self.unlock_external_images(&frame.deferred_resolves);
1995
1996        let _gm = self.gpu_profiler.start_marker("end frame");
1997        self.gpu_profiler.end_frame();
1998
1999        let debug_overlay = device_size.and_then(|device_size| {
2000            // Bind a surface to draw the debug / profiler information to.
2001            self.bind_debug_overlay(device_size).map(|draw_target| {
2002                self.draw_render_target_debug(&draw_target);
2003                self.draw_texture_cache_debug(&draw_target);
2004                self.draw_gpu_cache_debug(device_size);
2005                self.draw_zoom_debug(device_size);
2006                self.draw_epoch_debug();
2007                draw_target
2008            })
2009        });
2010
2011        self.profile.end_time(profiler::RENDERER_TIME);
2012        self.profile.end_time_if_started(profiler::TOTAL_FRAME_CPU_TIME);
2013
2014        let current_time = precise_time_ns();
2015        if device_size.is_some() {
2016            let time = profiler::ns_to_ms(current_time - self.last_time);
2017            self.profile.set(profiler::FRAME_TIME, time);
2018        }
2019
2020        if self.max_recorded_profiles > 0 {
2021            while self.cpu_profiles.len() >= self.max_recorded_profiles {
2022                self.cpu_profiles.pop_front();
2023            }
2024            let cpu_profile = CpuProfile::new(
2025                cpu_frame_id,
2026                (self.profile.get_or(profiler::FRAME_BUILDING_TIME, 0.0) * 1000000.0) as u64,
2027                (self.profile.get_or(profiler::RENDERER_TIME, 0.0) * 1000000.0) as u64,
2028                self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize,
2029            );
2030            self.cpu_profiles.push_back(cpu_profile);
2031        }
2032
2033        if thread_is_being_profiled() {
2034            let duration = Duration::new(0,0);
2035            let message = (self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize).to_string();
2036            add_text_marker(cstr!("NumDrawCalls"), &message, duration);
2037        }
2038
2039        let report = self.texture_resolver.report_memory();
2040        self.profile.set(profiler::RENDER_TARGET_MEM, profiler::bytes_to_mb(report.render_target_textures));
2041        self.profile.set(profiler::PICTURE_TILES_MEM, profiler::bytes_to_mb(report.picture_tile_textures));
2042        self.profile.set(profiler::ATLAS_TEXTURES_MEM, profiler::bytes_to_mb(report.atlas_textures));
2043        self.profile.set(profiler::STANDALONE_TEXTURES_MEM, profiler::bytes_to_mb(report.standalone_textures));
2044
2045        self.profile.set(profiler::DEPTH_TARGETS_MEM, profiler::bytes_to_mb(self.device.depth_targets_memory()));
2046
2047        results.stats.texture_upload_mb = self.profile.get_or(profiler::TEXTURE_UPLOADS_MEM, 0.0);
2048        self.frame_counter += 1;
2049        results.stats.resource_upload_time = self.resource_upload_time;
2050        self.resource_upload_time = 0.0;
2051        results.stats.gpu_cache_upload_time = self.gpu_cache_upload_time;
2052        self.gpu_cache_upload_time = 0.0;
2053
2054        if let Some(stats) = active_doc.frame_stats.take() {
2055          // Copy the full frame stats to RendererStats
2056          results.stats.merge(&stats);
2057
2058          self.profiler.update_frame_stats(stats);
2059        }
2060
2061        self.texture_resolver.update_profile(&mut self.profile);
2062
2063        // Note: this clears the values in self.profile.
2064        self.profiler.set_counters(&mut self.profile);
2065
2066        // Note: profile counters must be set before this or they will count for next frame.
2067        self.profiler.update();
2068
2069        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
2070            if let Some(device_size) = device_size {
2071                //TODO: take device/pixel ratio into equation?
2072                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
2073                    self.profiler.draw_profile(
2074                        self.frame_counter,
2075                        debug_renderer,
2076                        device_size,
2077                    );
2078                }
2079            }
2080        }
2081
2082        if self.debug_flags.contains(DebugFlags::ECHO_DRIVER_MESSAGES) {
2083            self.device.echo_driver_messages();
2084        }
2085
2086        if let Some(debug_renderer) = self.debug.try_get_mut() {
2087            let small_screen = self.debug_flags.contains(DebugFlags::SMALL_SCREEN);
2088            let scale = if small_screen { 1.6 } else { 1.0 };
2089            // TODO(gw): Tidy this up so that compositor config integrates better
2090            //           with the (non-compositor) surface y-flip options.
2091            let surface_origin_is_top_left = match self.current_compositor_kind {
2092                CompositorKind::Native { .. } => true,
2093                CompositorKind::Draw { .. } => self.device.surface_origin_is_top_left(),
2094            };
2095            // If there is a debug overlay, render it. Otherwise, just clear
2096            // the debug renderer.
2097            debug_renderer.render(
2098                &mut self.device,
2099                debug_overlay.and(device_size),
2100                scale,
2101                surface_origin_is_top_left,
2102            );
2103        }
2104
2105        self.staging_texture_pool.end_frame(&mut self.device);
2106        self.texture_upload_pbo_pool.end_frame(&mut self.device);
2107        self.device.end_frame();
2108
2109        if debug_overlay.is_some() {
2110            self.last_time = current_time;
2111
2112            // Unbind the target for the debug overlay. No debug or profiler drawing
2113            // can occur afer this point.
2114            self.unbind_debug_overlay();
2115        }
2116
2117        if device_size.is_some() {
2118            // Inform the client that we are finished this composition transaction if native
2119            // compositing is enabled. This must be called after any debug / profiling compositor
2120            // surfaces have been drawn and added to the visual tree.
2121            if let CompositorKind::Native { .. } = self.current_compositor_kind {
2122                profile_scope!("compositor.end_frame");
2123                let compositor = self.compositor_config.compositor().unwrap();
2124                compositor.end_frame();
2125            }
2126        }
2127
2128        self.documents_seen.clear();
2129        self.shared_texture_cache_cleared = false;
2130
2131        if self.renderer_errors.is_empty() {
2132            Ok(results)
2133        } else {
2134            Err(mem::replace(&mut self.renderer_errors, Vec::new()))
2135        }
2136    }
2137
2138    fn update_gpu_profile(&mut self, device_size: DeviceIntSize) {
2139        let _gm = self.gpu_profiler.start_marker("build samples");
2140        // Block CPU waiting for last frame's GPU profiles to arrive.
2141        // In general this shouldn't block unless heavily GPU limited.
2142        let (gpu_frame_id, timers, samplers) = self.gpu_profiler.build_samples();
2143
2144        if self.max_recorded_profiles > 0 {
2145            while self.gpu_profiles.len() >= self.max_recorded_profiles {
2146                self.gpu_profiles.pop_front();
2147            }
2148
2149            self.gpu_profiles.push_back(GpuProfile::new(gpu_frame_id, &timers));
2150        }
2151
2152        self.profiler.set_gpu_time_queries(timers);
2153
2154        if !samplers.is_empty() {
2155            let screen_fraction = 1.0 / device_size.to_f32().area();
2156
2157            fn accumulate_sampler_value(description: &str, samplers: &[GpuSampler]) -> f32 {
2158                let mut accum = 0.0;
2159                for sampler in samplers {
2160                    if sampler.tag.label != description {
2161                        continue;
2162                    }
2163
2164                    accum += sampler.count as f32;
2165                }
2166
2167                accum
2168            }
2169
2170            let alpha_targets = accumulate_sampler_value(&"Alpha targets", &samplers) * screen_fraction;
2171            let transparent_pass = accumulate_sampler_value(&"Transparent pass", &samplers) * screen_fraction;
2172            let opaque_pass = accumulate_sampler_value(&"Opaque pass", &samplers) * screen_fraction;
2173            self.profile.set(profiler::ALPHA_TARGETS_SAMPLERS, alpha_targets);
2174            self.profile.set(profiler::TRANSPARENT_PASS_SAMPLERS, transparent_pass);
2175            self.profile.set(profiler::OPAQUE_PASS_SAMPLERS, opaque_pass);
2176            self.profile.set(profiler::TOTAL_SAMPLERS, alpha_targets + transparent_pass + opaque_pass);
2177        }
2178    }
2179
2180    fn update_texture_cache(&mut self) {
2181        profile_scope!("update_texture_cache");
2182
2183        let _gm = self.gpu_profiler.start_marker("texture cache update");
2184        let mut pending_texture_updates = mem::replace(&mut self.pending_texture_updates, vec![]);
2185        self.pending_texture_cache_updates = false;
2186
2187        self.profile.start_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
2188
2189        let mut create_cache_texture_time = 0;
2190        let mut delete_cache_texture_time = 0;
2191
2192        for update_list in pending_texture_updates.drain(..) {
2193            // Find any textures that will need to be deleted in this group of allocations.
2194            let mut pending_deletes = Vec::new();
2195            for allocation in &update_list.allocations {
2196                let old = self.texture_resolver.texture_cache_map.remove(&allocation.id);
2197                match allocation.kind {
2198                    TextureCacheAllocationKind::Alloc(_) => {
2199                        assert!(old.is_none(), "Renderer and backend disagree!");
2200                    }
2201                    TextureCacheAllocationKind::Reset(_) |
2202                    TextureCacheAllocationKind::Free => {
2203                        assert!(old.is_some(), "Renderer and backend disagree!");
2204                    }
2205                }
2206                if let Some(old) = old {
2207
2208                    // Regenerate the cache allocation info so we can search through deletes for reuse.
2209                    let size = old.texture.get_dimensions();
2210                    let info = TextureCacheAllocInfo {
2211                        width: size.width,
2212                        height: size.height,
2213                        format: old.texture.get_format(),
2214                        filter: old.texture.get_filter(),
2215                        target: old.texture.get_target(),
2216                        is_shared_cache: old.texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE),
2217                        has_depth: old.texture.supports_depth(),
2218                        category: old.category,
2219                    };
2220                    pending_deletes.push((old.texture, info));
2221                }
2222            }
2223            // Look for any alloc or reset that has matching alloc info and save it from being deleted.
2224            let mut reused_textures = VecDeque::with_capacity(pending_deletes.len());
2225            for allocation in &update_list.allocations {
2226                match allocation.kind {
2227                    TextureCacheAllocationKind::Alloc(ref info) |
2228                    TextureCacheAllocationKind::Reset(ref info) => {
2229                        reused_textures.push_back(
2230                            pending_deletes.iter()
2231                                .position(|(_, old_info)| *old_info == *info)
2232                                .map(|index| pending_deletes.swap_remove(index).0)
2233                        );
2234                    }
2235                    TextureCacheAllocationKind::Free => {}
2236                }
2237            }
2238            // Now that we've saved as many deletions for reuse as we can, actually delete whatever is left.
2239            if !pending_deletes.is_empty() {
2240                let delete_texture_start = precise_time_ns();
2241                for (texture, _) in pending_deletes {
2242                    add_event_marker(c_str!("TextureCacheFree"));
2243                    self.device.delete_texture(texture);
2244                }
2245                delete_cache_texture_time += precise_time_ns() - delete_texture_start;
2246            }
2247
2248            for allocation in update_list.allocations {
2249                match allocation.kind {
2250                    TextureCacheAllocationKind::Alloc(_) => add_event_marker(c_str!("TextureCacheAlloc")),
2251                    TextureCacheAllocationKind::Reset(_) => add_event_marker(c_str!("TextureCacheReset")),
2252                    TextureCacheAllocationKind::Free => {}
2253                };
2254                match allocation.kind {
2255                    TextureCacheAllocationKind::Alloc(ref info) |
2256                    TextureCacheAllocationKind::Reset(ref info) => {
2257                        let create_cache_texture_start = precise_time_ns();
2258                        // Create a new native texture, as requested by the texture cache.
2259                        // If we managed to reuse a deleted texture, then prefer that instead.
2260                        //
2261                        // Ensure no PBO is bound when creating the texture storage,
2262                        // or GL will attempt to read data from there.
2263                        let mut texture = reused_textures.pop_front().unwrap_or(None).unwrap_or_else(|| {
2264                            self.device.create_texture(
2265                                info.target,
2266                                info.format,
2267                                info.width,
2268                                info.height,
2269                                info.filter,
2270                                // This needs to be a render target because some render
2271                                // tasks get rendered into the texture cache.
2272                                Some(RenderTargetInfo { has_depth: info.has_depth }),
2273                            )
2274                        });
2275
2276                        if info.is_shared_cache {
2277                            texture.flags_mut()
2278                                .insert(TextureFlags::IS_SHARED_TEXTURE_CACHE);
2279
2280                            // On Mali-Gxx devices we use batched texture uploads as it performs much better.
2281                            // However, due to another driver bug we must ensure the textures are fully cleared,
2282                            // otherwise we get visual artefacts when blitting to the texture cache.
2283                            if self.device.use_batched_texture_uploads() &&
2284                                !self.device.get_capabilities().supports_render_target_partial_update
2285                            {
2286                                self.clear_texture(&texture, [0.0; 4]);
2287                            }
2288
2289                            // Textures in the cache generally don't need to be cleared,
2290                            // but we do so if the debug display is active to make it
2291                            // easier to identify unallocated regions.
2292                            if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
2293                                self.clear_texture(&texture, TEXTURE_CACHE_DBG_CLEAR_COLOR);
2294                            }
2295                        }
2296
2297                        create_cache_texture_time += precise_time_ns() - create_cache_texture_start;
2298
2299                        self.texture_resolver.texture_cache_map.insert(allocation.id, CacheTexture {
2300                            texture,
2301                            category: info.category,
2302                        });
2303                    }
2304                    TextureCacheAllocationKind::Free => {}
2305                };
2306            }
2307
2308            upload_to_texture_cache(self, update_list.updates);
2309        }
2310
2311        if create_cache_texture_time > 0 {
2312            self.profile.set(
2313                profiler::CREATE_CACHE_TEXTURE_TIME,
2314                profiler::ns_to_ms(create_cache_texture_time)
2315            );
2316        }
2317        if delete_cache_texture_time > 0 {
2318            self.profile.set(
2319                profiler::DELETE_CACHE_TEXTURE_TIME,
2320                profiler::ns_to_ms(delete_cache_texture_time)
2321            )
2322        }
2323
2324        let t = self.profile.end_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
2325        self.resource_upload_time += t;
2326
2327        drain_filter(
2328            &mut self.notifications,
2329            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
2330            |n| { n.notify(); },
2331        );
2332    }
2333
2334    fn bind_textures(&mut self, textures: &BatchTextures) {
2335        for i in 0 .. 3 {
2336            self.texture_resolver.bind(
2337                &textures.input.colors[i],
2338                TextureSampler::color(i),
2339                &mut self.device,
2340            );
2341        }
2342
2343        self.texture_resolver.bind(
2344            &textures.clip_mask,
2345            TextureSampler::ClipMask,
2346            &mut self.device,
2347        );
2348
2349        // TODO: this probably isn't the best place for this.
2350        if let Some(ref texture) = self.dither_matrix_texture {
2351            self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
2352        }
2353    }
2354
2355    fn draw_instanced_batch<T: Clone>(
2356        &mut self,
2357        data: &[T],
2358        vertex_array_kind: VertexArrayKind,
2359        textures: &BatchTextures,
2360        stats: &mut RendererStats,
2361    ) {
2362        self.bind_textures(textures);
2363
2364        // If we end up with an empty draw call here, that means we have
2365        // probably introduced unnecessary batch breaks during frame
2366        // building - so we should be catching this earlier and removing
2367        // the batch.
2368        debug_assert!(!data.is_empty());
2369
2370        let vao = &self.vaos[vertex_array_kind];
2371        self.device.bind_vao(vao);
2372
2373        let chunk_size = if self.debug_flags.contains(DebugFlags::DISABLE_BATCHING) {
2374            1
2375        } else if vertex_array_kind == VertexArrayKind::Primitive {
2376            self.max_primitive_instance_count
2377        } else {
2378            data.len()
2379        };
2380
2381        for chunk in data.chunks(chunk_size) {
2382            if self.enable_instancing {
2383                self.device
2384                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, None);
2385                self.device
2386                    .draw_indexed_triangles_instanced_u16(6, chunk.len() as i32);
2387            } else {
2388                self.device
2389                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, NonZeroUsize::new(4));
2390                self.device
2391                    .draw_indexed_triangles(6 * chunk.len() as i32);
2392            }
2393            self.profile.inc(profiler::DRAW_CALLS);
2394            stats.total_draw_calls += 1;
2395        }
2396
2397        self.profile.add(profiler::VERTICES, 6 * data.len());
2398    }
2399
2400    fn handle_readback_composite(
2401        &mut self,
2402        draw_target: DrawTarget,
2403        uses_scissor: bool,
2404        backdrop: &RenderTask,
2405        readback: &RenderTask,
2406    ) {
2407        // Extract the rectangle in the backdrop surface's device space of where
2408        // we need to read from.
2409        let readback_origin = match readback.kind {
2410            RenderTaskKind::Readback(ReadbackTask { readback_origin: Some(o), .. }) => o,
2411            RenderTaskKind::Readback(ReadbackTask { readback_origin: None, .. }) => {
2412                // If this is a dummy readback, just early out. We know that the
2413                // clear of the target will ensure the task rect is already zero alpha,
2414                // so it won't affect the rendering output.
2415                return;
2416            }
2417            _ => unreachable!(),
2418        };
2419
2420        if uses_scissor {
2421            self.device.disable_scissor();
2422        }
2423
2424        let texture_source = TextureSource::TextureCache(
2425            readback.get_target_texture(),
2426            Swizzle::default(),
2427        );
2428        let (cache_texture, _) = self.texture_resolver
2429            .resolve(&texture_source).expect("bug: no source texture");
2430
2431        // Before submitting the composite batch, do the
2432        // framebuffer readbacks that are needed for each
2433        // composite operation in this batch.
2434        let readback_rect = readback.get_target_rect();
2435        let backdrop_rect = backdrop.get_target_rect();
2436        let (backdrop_screen_origin, _) = match backdrop.kind {
2437            RenderTaskKind::Picture(ref task_info) => (task_info.content_origin, task_info.device_pixel_scale),
2438            _ => panic!("bug: composite on non-picture?"),
2439        };
2440
2441        // Bind the FBO to blit the backdrop to.
2442        // Called per-instance in case the FBO changes. The device will skip
2443        // the GL call if the requested target is already bound.
2444        let cache_draw_target = DrawTarget::from_texture(
2445            cache_texture,
2446            false,
2447        );
2448
2449        // Get the rect that we ideally want, in space of the parent surface
2450        let wanted_rect = DeviceRect::from_origin_and_size(
2451            readback_origin,
2452            readback_rect.size().to_f32(),
2453        );
2454
2455        // Get the rect that is available on the parent surface. It may be smaller
2456        // than desired because this is a picture cache tile covering only part of
2457        // the wanted rect and/or because the parent surface was clipped.
2458        let avail_rect = DeviceRect::from_origin_and_size(
2459            backdrop_screen_origin,
2460            backdrop_rect.size().to_f32(),
2461        );
2462
2463        if let Some(int_rect) = wanted_rect.intersection(&avail_rect) {
2464            // If there is a valid intersection, work out the correct origins and
2465            // sizes of the copy rects, and do the blit.
2466            let copy_size = int_rect.size().to_i32();
2467
2468            let src_origin = backdrop_rect.min.to_f32() +
2469                int_rect.min.to_vector() -
2470                backdrop_screen_origin.to_vector();
2471
2472            let src = DeviceIntRect::from_origin_and_size(
2473                src_origin.to_i32(),
2474                copy_size,
2475            );
2476
2477            let dest_origin = readback_rect.min.to_f32() +
2478                int_rect.min.to_vector() -
2479                readback_origin.to_vector();
2480
2481            let dest = DeviceIntRect::from_origin_and_size(
2482                dest_origin.to_i32(),
2483                copy_size,
2484            );
2485
2486            // Should always be drawing to picture cache tiles or off-screen surface!
2487            debug_assert!(!draw_target.is_default());
2488            let device_to_framebuffer = Scale::new(1i32);
2489
2490            self.device.blit_render_target(
2491                draw_target.into(),
2492                src * device_to_framebuffer,
2493                cache_draw_target,
2494                dest * device_to_framebuffer,
2495                TextureFilter::Linear,
2496            );
2497        }
2498
2499        // Restore draw target to current pass render target, and reset
2500        // the read target.
2501        self.device.bind_draw_target(draw_target);
2502        self.device.reset_read_target();
2503
2504        if uses_scissor {
2505            self.device.enable_scissor();
2506        }
2507    }
2508
2509    fn handle_blits(
2510        &mut self,
2511        blits: &[BlitJob],
2512        render_tasks: &RenderTaskGraph,
2513        draw_target: DrawTarget,
2514    ) {
2515        if blits.is_empty() {
2516            return;
2517        }
2518
2519        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
2520
2521        // TODO(gw): For now, we don't bother batching these by source texture.
2522        //           If if ever shows up as an issue, we can easily batch them.
2523        for blit in blits {
2524            let (source, source_rect) = {
2525                // A blit from the child render task into this target.
2526                // TODO(gw): Support R8 format here once we start
2527                //           creating mips for alpha masks.
2528                let task = &render_tasks[blit.source];
2529                let source_rect = task.get_target_rect();
2530                let source_texture = task.get_texture_source();
2531
2532                (source_texture, source_rect)
2533            };
2534
2535            debug_assert_eq!(source_rect.size(), blit.target_rect.size());
2536            let (texture, swizzle) = self.texture_resolver
2537                .resolve(&source)
2538                .expect("BUG: invalid source texture");
2539
2540            if swizzle != Swizzle::default() {
2541                error!("Swizzle {:?} can't be handled by a blit", swizzle);
2542            }
2543
2544            let read_target = DrawTarget::from_texture(
2545                texture,
2546                false,
2547            );
2548
2549            self.device.blit_render_target(
2550                read_target.into(),
2551                read_target.to_framebuffer_rect(source_rect),
2552                draw_target,
2553                draw_target.to_framebuffer_rect(blit.target_rect),
2554                TextureFilter::Linear,
2555            );
2556        }
2557    }
2558
2559    fn handle_scaling(
2560        &mut self,
2561        scalings: &FastHashMap<TextureSource, Vec<ScalingInstance>>,
2562        projection: &default::Transform3D<f32>,
2563        stats: &mut RendererStats,
2564    ) {
2565        if scalings.is_empty() {
2566            return
2567        }
2568
2569        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SCALE);
2570
2571        for (source, instances) in scalings {
2572            let buffer_kind = source.image_buffer_kind();
2573
2574            self.shaders
2575                .borrow_mut()
2576                .get_scale_shader(buffer_kind)
2577                .bind(
2578                    &mut self.device,
2579                    &projection,
2580                    Some(self.texture_resolver.get_texture_size(source).to_f32()),
2581                    &mut self.renderer_errors,
2582                );
2583
2584            self.draw_instanced_batch(
2585                instances,
2586                VertexArrayKind::Scale,
2587                &BatchTextures::composite_rgb(*source),
2588                stats,
2589            );
2590        }
2591    }
2592
2593    fn handle_svg_filters(
2594        &mut self,
2595        textures: &BatchTextures,
2596        svg_filters: &[SvgFilterInstance],
2597        projection: &default::Transform3D<f32>,
2598        stats: &mut RendererStats,
2599    ) {
2600        if svg_filters.is_empty() {
2601            return;
2602        }
2603
2604        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER);
2605
2606        self.shaders.borrow_mut().cs_svg_filter.bind(
2607            &mut self.device,
2608            &projection,
2609            None,
2610            &mut self.renderer_errors
2611        );
2612
2613        self.draw_instanced_batch(
2614            &svg_filters,
2615            VertexArrayKind::SvgFilter,
2616            textures,
2617            stats,
2618        );
2619    }
2620
2621    fn draw_picture_cache_target(
2622        &mut self,
2623        target: &PictureCacheTarget,
2624        draw_target: DrawTarget,
2625        projection: &default::Transform3D<f32>,
2626        render_tasks: &RenderTaskGraph,
2627        stats: &mut RendererStats,
2628    ) {
2629        profile_scope!("draw_picture_cache_target");
2630
2631        self.profile.inc(profiler::RENDERED_PICTURE_TILES);
2632        let _gm = self.gpu_profiler.start_marker("picture cache target");
2633        let framebuffer_kind = FramebufferKind::Other;
2634
2635        {
2636            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
2637            self.device.bind_draw_target(draw_target);
2638            self.device.enable_depth_write();
2639            self.set_blend(false, framebuffer_kind);
2640
2641            let clear_color = target.clear_color.map(|c| c.to_array());
2642            let scissor_rect = if self.device.get_capabilities().supports_render_target_partial_update {
2643                target.alpha_batch_container.task_scissor_rect
2644            } else {
2645                None
2646            };
2647            match scissor_rect {
2648                // If updating only a dirty rect within a picture cache target, the
2649                // clear must also be scissored to that dirty region.
2650                Some(r) if self.clear_caches_with_quads => {
2651                    self.device.enable_depth(DepthFunction::Always);
2652                    // Save the draw call count so that our reftests don't get confused...
2653                    let old_draw_call_count = stats.total_draw_calls;
2654                    if clear_color.is_none() {
2655                        self.device.disable_color_write();
2656                    }
2657                    let instance = ClearInstance {
2658                        rect: [
2659                            r.min.x as f32, r.min.y as f32,
2660                            r.max.x as f32, r.max.y as f32,
2661                        ],
2662                        color: clear_color.unwrap_or([0.0; 4]),
2663                    };
2664                    self.shaders.borrow_mut().ps_clear.bind(
2665                        &mut self.device,
2666                        &projection,
2667                        None,
2668                        &mut self.renderer_errors,
2669                    );
2670                    self.draw_instanced_batch(
2671                        &[instance],
2672                        VertexArrayKind::Clear,
2673                        &BatchTextures::empty(),
2674                        stats,
2675                    );
2676                    if clear_color.is_none() {
2677                        self.device.enable_color_write();
2678                    }
2679                    stats.total_draw_calls = old_draw_call_count;
2680                    self.device.disable_depth();
2681                }
2682                other => {
2683                    let scissor_rect = other.map(|rect| {
2684                        draw_target.build_scissor_rect(Some(rect))
2685                    });
2686                    self.device.clear_target(clear_color, Some(1.0), scissor_rect);
2687                }
2688            };
2689            self.device.disable_depth_write();
2690        }
2691
2692        self.draw_alpha_batch_container(
2693            &target.alpha_batch_container,
2694            draw_target,
2695            framebuffer_kind,
2696            projection,
2697            render_tasks,
2698            stats,
2699        );
2700
2701        self.device.invalidate_depth_target();
2702    }
2703
2704    /// Draw an alpha batch container into a given draw target. This is used
2705    /// by both color and picture cache target kinds.
2706    fn draw_alpha_batch_container(
2707        &mut self,
2708        alpha_batch_container: &AlphaBatchContainer,
2709        draw_target: DrawTarget,
2710        framebuffer_kind: FramebufferKind,
2711        projection: &default::Transform3D<f32>,
2712        render_tasks: &RenderTaskGraph,
2713        stats: &mut RendererStats,
2714    ) {
2715        let uses_scissor = alpha_batch_container.task_scissor_rect.is_some();
2716
2717        if uses_scissor {
2718            self.device.enable_scissor();
2719            let scissor_rect = draw_target.build_scissor_rect(
2720                alpha_batch_container.task_scissor_rect,
2721            );
2722            self.device.set_scissor_rect(scissor_rect)
2723        }
2724
2725        if !alpha_batch_container.opaque_batches.is_empty()
2726            && !self.debug_flags.contains(DebugFlags::DISABLE_OPAQUE_PASS) {
2727            let _gl = self.gpu_profiler.start_marker("opaque batches");
2728            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
2729            self.set_blend(false, framebuffer_kind);
2730            //Note: depth equality is needed for split planes
2731            self.device.enable_depth(DepthFunction::LessEqual);
2732            self.device.enable_depth_write();
2733
2734            // Draw opaque batches front-to-back for maximum
2735            // z-buffer efficiency!
2736            for batch in alpha_batch_container
2737                .opaque_batches
2738                .iter()
2739                .rev()
2740                {
2741                    if should_skip_batch(&batch.key.kind, self.debug_flags) {
2742                        continue;
2743                    }
2744
2745                    self.shaders.borrow_mut()
2746                        .get(&batch.key, batch.features, self.debug_flags, &self.device)
2747                        .bind(
2748                            &mut self.device, projection, None,
2749                            &mut self.renderer_errors,
2750                        );
2751
2752                    let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
2753                    self.draw_instanced_batch(
2754                        &batch.instances,
2755                        VertexArrayKind::Primitive,
2756                        &batch.key.textures,
2757                        stats
2758                    );
2759                }
2760
2761            self.device.disable_depth_write();
2762            self.gpu_profiler.finish_sampler(opaque_sampler);
2763        } else {
2764            self.device.disable_depth();
2765        }
2766
2767        if !alpha_batch_container.alpha_batches.is_empty()
2768            && !self.debug_flags.contains(DebugFlags::DISABLE_ALPHA_PASS) {
2769            let _gl = self.gpu_profiler.start_marker("alpha batches");
2770            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
2771            self.set_blend(true, framebuffer_kind);
2772
2773            let mut prev_blend_mode = BlendMode::None;
2774            let shaders_rc = self.shaders.clone();
2775
2776            for batch in &alpha_batch_container.alpha_batches {
2777                if should_skip_batch(&batch.key.kind, self.debug_flags) {
2778                    continue;
2779                }
2780
2781                let mut shaders = shaders_rc.borrow_mut();
2782                let shader = shaders.get(
2783                    &batch.key,
2784                    batch.features | BatchFeatures::ALPHA_PASS,
2785                    self.debug_flags,
2786                    &self.device,
2787                );
2788
2789                if batch.key.blend_mode != prev_blend_mode {
2790                    match batch.key.blend_mode {
2791                        _ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
2792                            framebuffer_kind == FramebufferKind::Main => {
2793                            self.device.set_blend_mode_show_overdraw();
2794                        }
2795                        BlendMode::None => {
2796                            unreachable!("bug: opaque blend in alpha pass");
2797                        }
2798                        BlendMode::Alpha => {
2799                            self.device.set_blend_mode_alpha();
2800                        }
2801                        BlendMode::PremultipliedAlpha => {
2802                            self.device.set_blend_mode_premultiplied_alpha();
2803                        }
2804                        BlendMode::PremultipliedDestOut => {
2805                            self.device.set_blend_mode_premultiplied_dest_out();
2806                        }
2807                        BlendMode::SubpixelDualSource => {
2808                            self.device.set_blend_mode_subpixel_dual_source();
2809                        }
2810                        BlendMode::SubpixelConstantTextColor(color) => {
2811                            self.device.set_blend_mode_subpixel_constant_text_color(color);
2812                        }
2813                        BlendMode::SubpixelWithBgColor => {
2814                            // Using the three pass "component alpha with font smoothing
2815                            // background color" rendering technique:
2816                            //
2817                            // /webrender/doc/text-rendering.md
2818                            //
2819                            self.device.set_blend_mode_subpixel_with_bg_color_pass0();
2820                            // need to make sure the shader is bound
2821                            shader.bind(
2822                                &mut self.device,
2823                                projection,
2824                                None,
2825                                &mut self.renderer_errors,
2826                            );
2827                            self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass0 as _);
2828                        }
2829                        BlendMode::Advanced(mode) => {
2830                            if self.enable_advanced_blend_barriers {
2831                                self.device.gl().blend_barrier_khr();
2832                            }
2833                            self.device.set_blend_mode_advanced(mode);
2834                        }
2835                        BlendMode::MultiplyDualSource => {
2836                            self.device.set_blend_mode_multiply_dual_source();
2837                        }
2838                        BlendMode::Screen => {
2839                            self.device.set_blend_mode_screen();
2840                        }
2841                        BlendMode::Exclusion => {
2842                            self.device.set_blend_mode_exclusion();
2843                        }
2844                    }
2845                    prev_blend_mode = batch.key.blend_mode;
2846                }
2847
2848                // Handle special case readback for composites.
2849                if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, backdrop_id }) = batch.key.kind {
2850                    // composites can't be grouped together because
2851                    // they may overlap and affect each other.
2852                    debug_assert_eq!(batch.instances.len(), 1);
2853                    self.handle_readback_composite(
2854                        draw_target,
2855                        uses_scissor,
2856                        &render_tasks[task_id],
2857                        &render_tasks[backdrop_id],
2858                    );
2859                }
2860
2861                let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
2862                shader.bind(
2863                    &mut self.device,
2864                    projection,
2865                    None,
2866                    &mut self.renderer_errors,
2867                );
2868
2869                self.draw_instanced_batch(
2870                    &batch.instances,
2871                    VertexArrayKind::Primitive,
2872                    &batch.key.textures,
2873                    stats
2874                );
2875
2876                if batch.key.blend_mode == BlendMode::SubpixelWithBgColor {
2877                    self.set_blend_mode_subpixel_with_bg_color_pass1(framebuffer_kind);
2878                    // re-binding the shader after the blend mode change
2879                    shader.bind(
2880                        &mut self.device,
2881                        projection,
2882                        None,
2883                        &mut self.renderer_errors,
2884                    );
2885                    self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass1 as _);
2886
2887                    // When drawing the 2nd and 3rd passes, we know that the VAO, textures etc
2888                    // are all set up from the previous draw_instanced_batch call,
2889                    // so just issue a draw call here to avoid re-uploading the
2890                    // instances and re-binding textures etc.
2891                    self.device
2892                        .draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
2893
2894                    self.set_blend_mode_subpixel_with_bg_color_pass2(framebuffer_kind);
2895                    // re-binding the shader after the blend mode change
2896                    shader.bind(
2897                        &mut self.device,
2898                        projection,
2899                        None,
2900                        &mut self.renderer_errors,
2901                    );
2902                    self.device.switch_mode(ShaderColorMode::SubpixelWithBgColorPass2 as _);
2903
2904                    self.device
2905                        .draw_indexed_triangles_instanced_u16(6, batch.instances.len() as i32);
2906                }
2907
2908                if batch.key.blend_mode == BlendMode::SubpixelWithBgColor {
2909                    prev_blend_mode = BlendMode::None;
2910                }
2911            }
2912
2913            self.set_blend(false, framebuffer_kind);
2914            self.gpu_profiler.finish_sampler(transparent_sampler);
2915        }
2916
2917        self.device.disable_depth();
2918        if uses_scissor {
2919            self.device.disable_scissor();
2920        }
2921    }
2922
2923    /// Rasterize any external compositor surfaces that require updating
2924    fn update_external_native_surfaces(
2925        &mut self,
2926        external_surfaces: &[ResolvedExternalSurface],
2927        results: &mut RenderResults,
2928    ) {
2929        if external_surfaces.is_empty() {
2930            return;
2931        }
2932
2933        let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
2934
2935        self.device.disable_depth();
2936        self.set_blend(false, FramebufferKind::Main);
2937
2938        for surface in external_surfaces {
2939            // See if this surface needs to be updated
2940            let (native_surface_id, surface_size) = match surface.update_params {
2941                Some(params) => params,
2942                None => continue,
2943            };
2944
2945            // When updating an external surface, the entire surface rect is used
2946            // for all of the draw, dirty, valid and clip rect parameters.
2947            let surface_rect = surface_size.into();
2948
2949            // Bind the native compositor surface to update
2950            let surface_info = self.compositor_config
2951                .compositor()
2952                .unwrap()
2953                .bind(
2954                    NativeTileId {
2955                        surface_id: native_surface_id,
2956                        x: 0,
2957                        y: 0,
2958                    },
2959                    surface_rect,
2960                    surface_rect,
2961                );
2962
2963            // Bind the native surface to current FBO target
2964            let draw_target = DrawTarget::NativeSurface {
2965                offset: surface_info.origin,
2966                external_fbo_id: surface_info.fbo_id,
2967                dimensions: surface_size,
2968            };
2969            self.device.bind_draw_target(draw_target);
2970
2971            let projection = Transform3D::ortho(
2972                0.0,
2973                surface_size.width as f32,
2974                0.0,
2975                surface_size.height as f32,
2976                self.device.ortho_near_plane(),
2977                self.device.ortho_far_plane(),
2978            );
2979
2980            let ( textures, instance ) = match surface.color_data {
2981                ResolvedExternalSurfaceColorData::Yuv{
2982                        ref planes, color_space, format, channel_bit_depth, .. } => {
2983
2984                    // Bind an appropriate YUV shader for the texture format kind
2985                    self.shaders
2986                        .borrow_mut()
2987                        .get_composite_shader(
2988                            CompositeSurfaceFormat::Yuv,
2989                            surface.image_buffer_kind,
2990                            CompositeFeatures::empty(),
2991                        ).bind(
2992                            &mut self.device,
2993                            &projection,
2994                            None,
2995                            &mut self.renderer_errors
2996                        );
2997
2998                    let textures = BatchTextures::composite_yuv(
2999                        planes[0].texture,
3000                        planes[1].texture,
3001                        planes[2].texture,
3002                    );
3003
3004                    // When the texture is an external texture, the UV rect is not known when
3005                    // the external surface descriptor is created, because external textures
3006                    // are not resolved until the lock() callback is invoked at the start of
3007                    // the frame render. To handle this, query the texture resolver for the
3008                    // UV rect if it's an external texture, otherwise use the default UV rect.
3009                    let uv_rects = [
3010                        self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3011                        self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3012                        self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3013                    ];
3014
3015                    let instance = CompositeInstance::new_yuv(
3016                        surface_rect.cast_unit().to_f32(),
3017                        surface_rect.to_f32(),
3018                        // z-id is not relevant when updating a native compositor surface.
3019                        // TODO(gw): Support compositor surfaces without z-buffer, for memory / perf win here.
3020                        ZBufferId(0),
3021                        color_space,
3022                        format,
3023                        channel_bit_depth,
3024                        uv_rects,
3025                        CompositorTransform::identity(),
3026                    );
3027
3028                    ( textures, instance )
3029                },
3030                ResolvedExternalSurfaceColorData::Rgb{ ref plane, .. } => {
3031                    self.shaders
3032                        .borrow_mut()
3033                        .get_composite_shader(
3034                            CompositeSurfaceFormat::Rgba,
3035                            surface.image_buffer_kind,
3036                            CompositeFeatures::empty(),
3037                        ).bind(
3038                            &mut self.device,
3039                            &projection,
3040                            None,
3041                            &mut self.renderer_errors
3042                        );
3043
3044                    let textures = BatchTextures::composite_rgb(plane.texture);
3045                    let uv_rect = self.texture_resolver.get_uv_rect(&textures.input.colors[0], plane.uv_rect);
3046                    let instance = CompositeInstance::new_rgb(
3047                        surface_rect.cast_unit().to_f32(),
3048                        surface_rect.to_f32(),
3049                        PremultipliedColorF::WHITE,
3050                        ZBufferId(0),
3051                        uv_rect,
3052                        CompositorTransform::identity(),
3053                    );
3054
3055                    ( textures, instance )
3056                },
3057            };
3058
3059            self.draw_instanced_batch(
3060                &[instance],
3061                VertexArrayKind::Composite,
3062                &textures,
3063                &mut results.stats,
3064            );
3065
3066            self.compositor_config
3067                .compositor()
3068                .unwrap()
3069                .unbind();
3070        }
3071
3072        self.gpu_profiler.finish_sampler(opaque_sampler);
3073    }
3074
3075    /// Draw a list of tiles to the framebuffer
3076    fn draw_tile_list<'a, I: Iterator<Item = &'a occlusion::Item>>(
3077        &mut self,
3078        tiles_iter: I,
3079        composite_state: &CompositeState,
3080        external_surfaces: &[ResolvedExternalSurface],
3081        projection: &default::Transform3D<f32>,
3082        stats: &mut RendererStats,
3083    ) {
3084        let mut current_shader_params = (
3085            CompositeSurfaceFormat::Rgba,
3086            ImageBufferKind::Texture2D,
3087            CompositeFeatures::empty(),
3088            None,
3089        );
3090        let mut current_textures = BatchTextures::empty();
3091        let mut instances = Vec::new();
3092
3093        self.shaders
3094            .borrow_mut()
3095            .get_composite_shader(
3096                current_shader_params.0,
3097                current_shader_params.1,
3098                current_shader_params.2,
3099            ).bind(
3100                &mut self.device,
3101                projection,
3102                None,
3103                &mut self.renderer_errors
3104            );
3105
3106        for item in tiles_iter {
3107            let tile = &composite_state.tiles[item.key];
3108
3109            let clip_rect = item.rectangle;
3110            let tile_rect = tile.local_rect;
3111            let transform = composite_state.get_device_transform(tile.transform_index).into();
3112
3113            // Work out the draw params based on the tile surface
3114            let (instance, textures, shader_params) = match tile.surface {
3115                CompositeTileSurface::Color { color } => {
3116                    let dummy = TextureSource::Dummy;
3117                    let image_buffer_kind = dummy.image_buffer_kind();
3118                    let instance = CompositeInstance::new(
3119                        tile_rect,
3120                        clip_rect,
3121                        color.premultiplied(),
3122                        tile.z_id,
3123                        transform,
3124                    );
3125                    let features = instance.get_rgb_features();
3126                    (
3127                        instance,
3128                        BatchTextures::composite_rgb(dummy),
3129                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3130                    )
3131                }
3132                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::TextureCache { texture } } => {
3133                    let instance = CompositeInstance::new(
3134                        tile_rect,
3135                        clip_rect,
3136                        PremultipliedColorF::WHITE,
3137                        tile.z_id,
3138                        transform,
3139                    );
3140                    let features = instance.get_rgb_features();
3141                    (
3142                        instance,
3143                        BatchTextures::composite_rgb(texture),
3144                        (
3145                            CompositeSurfaceFormat::Rgba,
3146                            ImageBufferKind::Texture2D,
3147                            features,
3148                            None,
3149                        ),
3150                    )
3151                }
3152                CompositeTileSurface::ExternalSurface { external_surface_index } => {
3153                    let surface = &external_surfaces[external_surface_index.0];
3154
3155                    match surface.color_data {
3156                        ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, channel_bit_depth, .. } => {
3157                            let textures = BatchTextures::composite_yuv(
3158                                planes[0].texture,
3159                                planes[1].texture,
3160                                planes[2].texture,
3161                            );
3162
3163                            // When the texture is an external texture, the UV rect is not known when
3164                            // the external surface descriptor is created, because external textures
3165                            // are not resolved until the lock() callback is invoked at the start of
3166                            // the frame render. To handle this, query the texture resolver for the
3167                            // UV rect if it's an external texture, otherwise use the default UV rect.
3168                            let uv_rects = [
3169                                self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3170                                self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3171                                self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3172                            ];
3173
3174                            (
3175                                CompositeInstance::new_yuv(
3176                                    tile_rect,
3177                                    clip_rect,
3178                                    tile.z_id,
3179                                    color_space,
3180                                    format,
3181                                    channel_bit_depth,
3182                                    uv_rects,
3183                                    transform,
3184                                ),
3185                                textures,
3186                                (
3187                                    CompositeSurfaceFormat::Yuv,
3188                                    surface.image_buffer_kind,
3189                                    CompositeFeatures::empty(),
3190                                    None
3191                                ),
3192                            )
3193                        },
3194                        ResolvedExternalSurfaceColorData::Rgb { ref plane, .. } => {
3195                            let uv_rect = self.texture_resolver.get_uv_rect(&plane.texture, plane.uv_rect);
3196                            let instance = CompositeInstance::new_rgb(
3197                                tile_rect,
3198                                clip_rect,
3199                                PremultipliedColorF::WHITE,
3200                                tile.z_id,
3201                                uv_rect,
3202                                transform,
3203                            );
3204                            let features = instance.get_rgb_features();
3205                            (
3206                                instance,
3207                                BatchTextures::composite_rgb(plane.texture),
3208                                (
3209                                    CompositeSurfaceFormat::Rgba,
3210                                    surface.image_buffer_kind,
3211                                    features,
3212                                    Some(self.texture_resolver.get_texture_size(&plane.texture).to_f32()),
3213                                ),
3214                            )
3215                        },
3216                    }
3217                }
3218                CompositeTileSurface::Clear => {
3219                    let dummy = TextureSource::Dummy;
3220                    let image_buffer_kind = dummy.image_buffer_kind();
3221                    let instance = CompositeInstance::new(
3222                        tile_rect,
3223                        clip_rect,
3224                        PremultipliedColorF::BLACK,
3225                        tile.z_id,
3226                        transform,
3227                    );
3228                    let features = instance.get_rgb_features();
3229                    (
3230                        instance,
3231                        BatchTextures::composite_rgb(dummy),
3232                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3233                    )
3234                }
3235                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { .. } } => {
3236                    unreachable!("bug: found native surface in simple composite path");
3237                }
3238            };
3239
3240            // Flush batch if shader params or textures changed
3241            let flush_batch = !current_textures.is_compatible_with(&textures) ||
3242                shader_params != current_shader_params;
3243
3244            if flush_batch {
3245                if !instances.is_empty() {
3246                    self.draw_instanced_batch(
3247                        &instances,
3248                        VertexArrayKind::Composite,
3249                        &current_textures,
3250                        stats,
3251                    );
3252                    instances.clear();
3253                }
3254            }
3255
3256            if shader_params != current_shader_params {
3257                self.shaders
3258                    .borrow_mut()
3259                    .get_composite_shader(shader_params.0, shader_params.1, shader_params.2)
3260                    .bind(
3261                        &mut self.device,
3262                        projection,
3263                        shader_params.3,
3264                        &mut self.renderer_errors
3265                    );
3266
3267                current_shader_params = shader_params;
3268            }
3269
3270            current_textures = textures;
3271
3272            // Add instance to current batch
3273            instances.push(instance);
3274        }
3275
3276        // Flush the last batch
3277        if !instances.is_empty() {
3278            self.draw_instanced_batch(
3279                &instances,
3280                VertexArrayKind::Composite,
3281                &current_textures,
3282                stats,
3283            );
3284        }
3285    }
3286
3287    /// Composite picture cache tiles into the framebuffer. This is currently
3288    /// the only way that picture cache tiles get drawn. In future, the tiles
3289    /// will often be handed to the OS compositor, and this method will be
3290    /// rarely used.
3291    fn composite_simple(
3292        &mut self,
3293        composite_state: &CompositeState,
3294        draw_target: DrawTarget,
3295        projection: &default::Transform3D<f32>,
3296        results: &mut RenderResults,
3297        partial_present_mode: Option<PartialPresentMode>,
3298    ) {
3299        let _gm = self.gpu_profiler.start_marker("framebuffer");
3300        let _timer = self.gpu_profiler.start_timer(GPU_TAG_COMPOSITE);
3301
3302        self.device.bind_draw_target(draw_target);
3303        self.device.disable_depth_write();
3304        self.device.disable_depth();
3305
3306        // If using KHR_partial_update, call eglSetDamageRegion.
3307        // This must be called exactly once per frame, and prior to any rendering to the main
3308        // framebuffer. Additionally, on Mali-G77 we encountered rendering issues when calling
3309        // this earlier in the frame, during offscreen render passes. So call it now, immediately
3310        // before rendering to the main framebuffer. See bug 1685276 for details.
3311        if let Some(partial_present) = self.compositor_config.partial_present() {
3312            if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
3313                partial_present.set_buffer_damage_region(&[dirty_rect.to_i32()]);
3314            }
3315        }
3316
3317        let cap = composite_state.tiles.len();
3318
3319        let mut occlusion = occlusion::FrontToBackBuilder::with_capacity(cap, cap);
3320        let mut clear_tiles = Vec::new();
3321
3322        for (idx, tile) in composite_state.tiles.iter().enumerate() {
3323            // Clear tiles overwrite whatever is under them, so they are treated as opaque.
3324            let is_opaque = tile.kind != TileKind::Alpha;
3325
3326            let device_tile_box = composite_state.get_device_rect(
3327                &tile.local_rect,
3328                tile.transform_index
3329            );
3330
3331            // Determine a clip rect to apply to this tile, depending on what
3332            // the partial present mode is.
3333            let partial_clip_rect = match partial_present_mode {
3334                Some(PartialPresentMode::Single { dirty_rect }) => dirty_rect,
3335                None => device_tile_box,
3336            };
3337
3338            // Simple compositor needs the valid rect in device space to match clip rect
3339            let device_valid_rect = composite_state
3340                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
3341
3342            let rect = device_tile_box
3343                .intersection_unchecked(&tile.device_clip_rect)
3344                .intersection_unchecked(&partial_clip_rect)
3345                .intersection_unchecked(&device_valid_rect);
3346
3347            if rect.is_empty() {
3348                continue;
3349            }
3350
3351            if tile.kind == TileKind::Clear {
3352                // Clear tiles are specific to how we render the window buttons on
3353                // Windows 8. We can get away with drawing them at the end on top
3354                // of everything else, which we do to avoid having to juggle with
3355                // the blend state.
3356                clear_tiles.push(occlusion::Item { rectangle: rect, key: idx });
3357                continue;
3358            }
3359
3360            occlusion.add(&rect, is_opaque, idx);
3361        }
3362
3363        // Clear the framebuffer
3364        let clear_color = Some(self.clear_color.to_array());
3365
3366        match partial_present_mode {
3367            Some(PartialPresentMode::Single { dirty_rect }) => {
3368                // There is no need to clear if the dirty rect is occluded. Additionally,
3369                // on Mali-G77 we have observed artefacts when calling glClear (even with
3370                // the empty scissor rect set) after calling eglSetDamageRegion with an
3371                // empty damage region. So avoid clearing in that case. See bug 1709548.
3372                if !dirty_rect.is_empty() && occlusion.test(&dirty_rect) {
3373                    // We have a single dirty rect, so clear only that
3374                    self.device.clear_target(clear_color,
3375                                             None,
3376                                             Some(draw_target.to_framebuffer_rect(dirty_rect.to_i32())));
3377                }
3378            }
3379            None => {
3380                // Partial present is disabled, so clear the entire framebuffer
3381                self.device.clear_target(clear_color,
3382                                         None,
3383                                         None);
3384            }
3385        }
3386
3387        // We are only interested in tiles backed with actual cached pixels so we don't
3388        // count clear tiles here.
3389        let num_tiles = composite_state.tiles
3390            .iter()
3391            .filter(|tile| tile.kind != TileKind::Clear).count();
3392        self.profile.set(profiler::PICTURE_TILES, num_tiles);
3393
3394        if !occlusion.opaque_items().is_empty() {
3395            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3396            self.set_blend(false, FramebufferKind::Main);
3397            self.draw_tile_list(
3398                occlusion.opaque_items().iter(),
3399                &composite_state,
3400                &composite_state.external_surfaces,
3401                projection,
3402                &mut results.stats,
3403            );
3404            self.gpu_profiler.finish_sampler(opaque_sampler);
3405        }
3406
3407        // Draw alpha tiles
3408        if !occlusion.alpha_items().is_empty() {
3409            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3410            self.set_blend(true, FramebufferKind::Main);
3411            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Main);
3412            self.draw_tile_list(
3413                occlusion.alpha_items().iter().rev(),
3414                &composite_state,
3415                &composite_state.external_surfaces,
3416                projection,
3417                &mut results.stats,
3418            );
3419            self.gpu_profiler.finish_sampler(transparent_sampler);
3420        }
3421
3422        if !clear_tiles.is_empty() {
3423            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3424            self.set_blend(true, FramebufferKind::Main);
3425            self.device.set_blend_mode_premultiplied_dest_out();
3426            self.draw_tile_list(
3427                clear_tiles.iter(),
3428                &composite_state,
3429                &composite_state.external_surfaces,
3430                projection,
3431                &mut results.stats,
3432            );
3433            self.gpu_profiler.finish_sampler(transparent_sampler);
3434        }
3435    }
3436
3437    fn draw_color_target(
3438        &mut self,
3439        draw_target: DrawTarget,
3440        target: &ColorRenderTarget,
3441        clear_color: Option<[f32; 4]>,
3442        clear_depth: Option<f32>,
3443        render_tasks: &RenderTaskGraph,
3444        projection: &default::Transform3D<f32>,
3445        stats: &mut RendererStats,
3446    ) {
3447        profile_scope!("draw_color_target");
3448
3449        self.profile.inc(profiler::COLOR_PASSES);
3450        let _gm = self.gpu_profiler.start_marker("color target");
3451
3452        // sanity check for the depth buffer
3453        if let DrawTarget::Texture { with_depth, .. } = draw_target {
3454            assert!(with_depth >= target.needs_depth());
3455        }
3456
3457        let framebuffer_kind = if draw_target.is_default() {
3458            FramebufferKind::Main
3459        } else {
3460            FramebufferKind::Other
3461        };
3462
3463        {
3464            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
3465            self.device.bind_draw_target(draw_target);
3466            self.device.disable_depth();
3467            self.set_blend(false, framebuffer_kind);
3468
3469            if clear_depth.is_some() {
3470                self.device.enable_depth_write();
3471            }
3472
3473            let clear_rect = match draw_target {
3474                DrawTarget::NativeSurface { .. } => {
3475                    unreachable!("bug: native compositor surface in child target");
3476                }
3477                DrawTarget::Default { rect, total_size, .. } if rect.min == FramebufferIntPoint::zero() && rect.size() == total_size => {
3478                    // whole screen is covered, no need for scissor
3479                    None
3480                }
3481                DrawTarget::Default { rect, .. } => {
3482                    Some(rect)
3483                }
3484                DrawTarget::Texture { .. } if self.enable_clear_scissor => {
3485                    // TODO(gw): Applying a scissor rect and minimal clear here
3486                    // is a very large performance win on the Intel and nVidia
3487                    // GPUs that I have tested with. It's possible it may be a
3488                    // performance penalty on other GPU types - we should test this
3489                    // and consider different code paths.
3490                    //
3491                    // Note: The above measurements were taken when render
3492                    // target slices were minimum 2048x2048. Now that we size
3493                    // them adaptively, this may be less of a win (except perhaps
3494                    // on a mostly-unused last slice of a large texture array).
3495                    Some(draw_target.to_framebuffer_rect(target.used_rect))
3496                }
3497                DrawTarget::Texture { .. } | DrawTarget::External { .. } => {
3498                    None
3499                }
3500            };
3501
3502            self.device.clear_target(
3503                clear_color,
3504                clear_depth,
3505                clear_rect,
3506            );
3507
3508            if clear_depth.is_some() {
3509                self.device.disable_depth_write();
3510            }
3511        }
3512
3513        // Handle any blits from the texture cache to this target.
3514        self.handle_blits(
3515            &target.blits,
3516            render_tasks,
3517            draw_target,
3518        );
3519
3520        // Draw any blurs for this target.
3521        // Blurs are rendered as a standard 2-pass
3522        // separable implementation.
3523        // TODO(gw): In the future, consider having
3524        //           fast path blur shaders for common
3525        //           blur radii with fixed weights.
3526        if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
3527            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
3528
3529            self.set_blend(false, framebuffer_kind);
3530            self.shaders.borrow_mut().cs_blur_rgba8
3531                .bind(&mut self.device, projection, None, &mut self.renderer_errors);
3532
3533            if !target.vertical_blurs.is_empty() {
3534                self.draw_blurs(
3535                    &target.vertical_blurs,
3536                    stats,
3537                );
3538            }
3539
3540            if !target.horizontal_blurs.is_empty() {
3541                self.draw_blurs(
3542                    &target.horizontal_blurs,
3543                    stats,
3544                );
3545            }
3546        }
3547
3548        self.handle_scaling(
3549            &target.scalings,
3550            projection,
3551            stats,
3552        );
3553
3554        for (ref textures, ref filters) in &target.svg_filters {
3555            self.handle_svg_filters(
3556                textures,
3557                filters,
3558                projection,
3559                stats,
3560            );
3561        }
3562
3563        for alpha_batch_container in &target.alpha_batch_containers {
3564            self.draw_alpha_batch_container(
3565                alpha_batch_container,
3566                draw_target,
3567                framebuffer_kind,
3568                projection,
3569                render_tasks,
3570                stats,
3571            );
3572        }
3573
3574        if clear_depth.is_some() {
3575            self.device.invalidate_depth_target();
3576        }
3577    }
3578
3579    fn draw_blurs(
3580        &mut self,
3581        blurs: &FastHashMap<TextureSource, Vec<BlurInstance>>,
3582        stats: &mut RendererStats,
3583    ) {
3584        for (texture, blurs) in blurs {
3585            let textures = BatchTextures::composite_rgb(
3586                *texture,
3587            );
3588
3589            self.draw_instanced_batch(
3590                blurs,
3591                VertexArrayKind::Blur,
3592                &textures,
3593                stats,
3594            );
3595        }
3596    }
3597
3598    /// Draw all the instances in a clip batcher list to the current target.
3599    fn draw_clip_batch_list(
3600        &mut self,
3601        list: &ClipBatchList,
3602        draw_target: &DrawTarget,
3603        projection: &default::Transform3D<f32>,
3604        stats: &mut RendererStats,
3605    ) {
3606        if self.debug_flags.contains(DebugFlags::DISABLE_CLIP_MASKS) {
3607            return;
3608        }
3609
3610        // draw rounded cornered rectangles
3611        if !list.slow_rectangles.is_empty() {
3612            let _gm2 = self.gpu_profiler.start_marker("slow clip rectangles");
3613            self.shaders.borrow_mut().cs_clip_rectangle_slow.bind(
3614                &mut self.device,
3615                projection,
3616                None,
3617                &mut self.renderer_errors,
3618            );
3619            self.draw_instanced_batch(
3620                &list.slow_rectangles,
3621                VertexArrayKind::ClipRect,
3622                &BatchTextures::empty(),
3623                stats,
3624            );
3625        }
3626        if !list.fast_rectangles.is_empty() {
3627            let _gm2 = self.gpu_profiler.start_marker("fast clip rectangles");
3628            self.shaders.borrow_mut().cs_clip_rectangle_fast.bind(
3629                &mut self.device,
3630                projection,
3631                None,
3632                &mut self.renderer_errors,
3633            );
3634            self.draw_instanced_batch(
3635                &list.fast_rectangles,
3636                VertexArrayKind::ClipRect,
3637                &BatchTextures::empty(),
3638                stats,
3639            );
3640        }
3641
3642        // draw box-shadow clips
3643        for (mask_texture_id, items) in list.box_shadows.iter() {
3644            let _gm2 = self.gpu_profiler.start_marker("box-shadows");
3645            let textures = BatchTextures::composite_rgb(*mask_texture_id);
3646            self.shaders.borrow_mut().cs_clip_box_shadow
3647                .bind(&mut self.device, projection, None, &mut self.renderer_errors);
3648            self.draw_instanced_batch(
3649                items,
3650                VertexArrayKind::ClipBoxShadow,
3651                &textures,
3652                stats,
3653            );
3654        }
3655
3656        // draw image masks
3657        let mut using_scissor = false;
3658        for ((mask_texture_id, clip_rect), items) in list.images.iter() {
3659            let _gm2 = self.gpu_profiler.start_marker("clip images");
3660            // Some image masks may require scissoring to ensure they don't draw
3661            // outside their task's target bounds. Axis-aligned primitives will
3662            // be clamped inside the shader and should not require scissoring.
3663            // TODO: We currently assume scissor state is off by default for
3664            // alpha targets here, but in the future we may want to track the
3665            // current scissor state so that this can be properly saved and
3666            // restored here.
3667            if let Some(clip_rect) = clip_rect {
3668                if !using_scissor {
3669                    self.device.enable_scissor();
3670                    using_scissor = true;
3671                }
3672                let scissor_rect = draw_target.build_scissor_rect(Some(*clip_rect));
3673                self.device.set_scissor_rect(scissor_rect);
3674            } else if using_scissor {
3675                self.device.disable_scissor();
3676                using_scissor = false;
3677            }
3678            let textures = BatchTextures::composite_rgb(*mask_texture_id);
3679            self.shaders.borrow_mut().cs_clip_image
3680                .bind(&mut self.device, projection, None, &mut self.renderer_errors);
3681            self.draw_instanced_batch(
3682                items,
3683                VertexArrayKind::ClipImage,
3684                &textures,
3685                stats,
3686            );
3687        }
3688        if using_scissor {
3689            self.device.disable_scissor();
3690        }
3691    }
3692
3693    fn draw_alpha_target(
3694        &mut self,
3695        draw_target: DrawTarget,
3696        target: &AlphaRenderTarget,
3697        projection: &default::Transform3D<f32>,
3698        render_tasks: &RenderTaskGraph,
3699        stats: &mut RendererStats,
3700    ) {
3701        profile_scope!("draw_alpha_target");
3702
3703        self.profile.inc(profiler::ALPHA_PASSES);
3704        let _gm = self.gpu_profiler.start_marker("alpha target");
3705        let alpha_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_ALPHA);
3706
3707        {
3708            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
3709            self.device.bind_draw_target(draw_target);
3710            self.device.disable_depth();
3711            self.device.disable_depth_write();
3712            self.set_blend(false, FramebufferKind::Other);
3713
3714            let zero_color = [0.0, 0.0, 0.0, 0.0];
3715            let one_color = [1.0, 1.0, 1.0, 1.0];
3716
3717            // On some Mali-T devices we have observed crashes in subsequent draw calls
3718            // immediately after clearing the alpha render target regions with glClear().
3719            // Using the shader to clear the regions avoids the crash. See bug 1638593.
3720            if self.clear_alpha_targets_with_quads
3721                && !(target.zero_clears.is_empty() && target.one_clears.is_empty())
3722            {
3723                let zeroes = target.zero_clears
3724                    .iter()
3725                    .map(|task_id| {
3726                        let rect = render_tasks[*task_id].get_target_rect().to_f32();
3727                        ClearInstance {
3728                            rect: [
3729                                rect.min.x, rect.min.y,
3730                                rect.max.x, rect.max.y,
3731                            ],
3732                            color: zero_color,
3733                        }
3734                    });
3735
3736                let ones = target.one_clears
3737                    .iter()
3738                    .map(|task_id| {
3739                        let rect = render_tasks[*task_id].get_target_rect().to_f32();
3740                        ClearInstance {
3741                            rect: [
3742                                rect.min.x, rect.min.y,
3743                                rect.max.x, rect.max.y,
3744                            ],
3745                            color: one_color,
3746                        }
3747                    });
3748
3749                let instances = zeroes.chain(ones).collect::<Vec<_>>();
3750                self.shaders.borrow_mut().ps_clear.bind(
3751                    &mut self.device,
3752                    &projection,
3753                    None,
3754                    &mut self.renderer_errors,
3755                );
3756                self.draw_instanced_batch(
3757                    &instances,
3758                    VertexArrayKind::Clear,
3759                    &BatchTextures::empty(),
3760                    stats,
3761                );
3762            } else {
3763                // TODO(gw): Applying a scissor rect and minimal clear here
3764                // is a very large performance win on the Intel and nVidia
3765                // GPUs that I have tested with. It's possible it may be a
3766                // performance penalty on other GPU types - we should test this
3767                // and consider different code paths.
3768                for &task_id in &target.zero_clears {
3769                    let rect = render_tasks[task_id].get_target_rect();
3770                    self.device.clear_target(
3771                        Some(zero_color),
3772                        None,
3773                        Some(draw_target.to_framebuffer_rect(rect)),
3774                    );
3775                }
3776
3777                for &task_id in &target.one_clears {
3778                    let rect = render_tasks[task_id].get_target_rect();
3779                    self.device.clear_target(
3780                        Some(one_color),
3781                        None,
3782                        Some(draw_target.to_framebuffer_rect(rect)),
3783                    );
3784                }
3785            }
3786        }
3787
3788        // Draw any blurs for this target.
3789        // Blurs are rendered as a standard 2-pass
3790        // separable implementation.
3791        // TODO(gw): In the future, consider having
3792        //           fast path blur shaders for common
3793        //           blur radii with fixed weights.
3794        if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
3795            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
3796
3797            self.shaders.borrow_mut().cs_blur_a8
3798                .bind(&mut self.device, projection, None, &mut self.renderer_errors);
3799
3800            if !target.vertical_blurs.is_empty() {
3801                self.draw_blurs(
3802                    &target.vertical_blurs,
3803                    stats,
3804                );
3805            }
3806
3807            if !target.horizontal_blurs.is_empty() {
3808                self.draw_blurs(
3809                    &target.horizontal_blurs,
3810                    stats,
3811                );
3812            }
3813        }
3814
3815        self.handle_scaling(
3816            &target.scalings,
3817            projection,
3818            stats,
3819        );
3820
3821        // Draw the clip items into the tiled alpha mask.
3822        {
3823            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CLIP);
3824
3825            // TODO(gw): Consider grouping multiple clip masks per shader
3826            //           invocation here to reduce memory bandwith further?
3827
3828            // Draw the primary clip mask - since this is the first mask
3829            // for the task, we can disable blending, knowing that it will
3830            // overwrite every pixel in the mask area.
3831            self.set_blend(false, FramebufferKind::Other);
3832            self.draw_clip_batch_list(
3833                &target.clip_batcher.primary_clips,
3834                &draw_target,
3835                projection,
3836                stats,
3837            );
3838
3839            // switch to multiplicative blending for secondary masks, using
3840            // multiplicative blending to accumulate clips into the mask.
3841            self.set_blend(true, FramebufferKind::Other);
3842            self.set_blend_mode_multiply(FramebufferKind::Other);
3843            self.draw_clip_batch_list(
3844                &target.clip_batcher.secondary_clips,
3845                &draw_target,
3846                projection,
3847                stats,
3848            );
3849        }
3850
3851        self.gpu_profiler.finish_sampler(alpha_sampler);
3852    }
3853
3854    fn draw_texture_cache_target(
3855        &mut self,
3856        texture: &CacheTextureId,
3857        target: &TextureCacheRenderTarget,
3858        render_tasks: &RenderTaskGraph,
3859        stats: &mut RendererStats,
3860    ) {
3861        profile_scope!("draw_texture_cache_target");
3862
3863        self.device.disable_depth();
3864        self.device.disable_depth_write();
3865
3866        self.set_blend(false, FramebufferKind::Other);
3867
3868        let texture = &self.texture_resolver.texture_cache_map[texture].texture;
3869        let target_size = texture.get_dimensions();
3870
3871        let projection = Transform3D::ortho(
3872            0.0,
3873            target_size.width as f32,
3874            0.0,
3875            target_size.height as f32,
3876            self.device.ortho_near_plane(),
3877            self.device.ortho_far_plane(),
3878        );
3879
3880        let draw_target = DrawTarget::from_texture(
3881            texture,
3882            false,
3883        );
3884        self.device.bind_draw_target(draw_target);
3885
3886        {
3887            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CLEAR);
3888
3889            self.device.disable_depth();
3890            self.device.disable_depth_write();
3891            self.set_blend(false, FramebufferKind::Other);
3892
3893            let color = [0.0, 0.0, 0.0, 0.0];
3894            if self.clear_caches_with_quads && !target.clears.is_empty() {
3895                let instances = target.clears
3896                    .iter()
3897                    .map(|r| ClearInstance {
3898                        rect: [
3899                            r.min.x as f32, r.min.y as f32,
3900                            r.max.x as f32, r.max.y as f32,
3901                        ],
3902                        color,
3903                    })
3904                    .collect::<Vec<_>>();
3905                self.shaders.borrow_mut().ps_clear.bind(
3906                    &mut self.device,
3907                    &projection,
3908                    None,
3909                    &mut self.renderer_errors,
3910                );
3911                self.draw_instanced_batch(
3912                    &instances,
3913                    VertexArrayKind::Clear,
3914                    &BatchTextures::empty(),
3915                    stats,
3916                );
3917            } else {
3918                for rect in &target.clears {
3919                    self.device.clear_target(
3920                        Some(color),
3921                        None,
3922                        Some(draw_target.to_framebuffer_rect(*rect)),
3923                    );
3924                }
3925            }
3926
3927            // Handle any blits to this texture from child tasks.
3928            self.handle_blits(
3929                &target.blits,
3930                render_tasks,
3931                draw_target,
3932            );
3933        }
3934
3935        // Draw any borders for this target.
3936        if !target.border_segments_solid.is_empty() ||
3937           !target.border_segments_complex.is_empty()
3938        {
3939            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_BORDER);
3940
3941            self.set_blend(true, FramebufferKind::Other);
3942            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
3943
3944            if !target.border_segments_solid.is_empty() {
3945                self.shaders.borrow_mut().cs_border_solid.bind(
3946                    &mut self.device,
3947                    &projection,
3948                    None,
3949                    &mut self.renderer_errors,
3950                );
3951
3952                self.draw_instanced_batch(
3953                    &target.border_segments_solid,
3954                    VertexArrayKind::Border,
3955                    &BatchTextures::empty(),
3956                    stats,
3957                );
3958            }
3959
3960            if !target.border_segments_complex.is_empty() {
3961                self.shaders.borrow_mut().cs_border_segment.bind(
3962                    &mut self.device,
3963                    &projection,
3964                    None,
3965                    &mut self.renderer_errors,
3966                );
3967
3968                self.draw_instanced_batch(
3969                    &target.border_segments_complex,
3970                    VertexArrayKind::Border,
3971                    &BatchTextures::empty(),
3972                    stats,
3973                );
3974            }
3975
3976            self.set_blend(false, FramebufferKind::Other);
3977        }
3978
3979        // Draw any line decorations for this target.
3980        if !target.line_decorations.is_empty() {
3981            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINE_DECORATION);
3982
3983            self.set_blend(true, FramebufferKind::Other);
3984            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
3985
3986            self.shaders.borrow_mut().cs_line_decoration.bind(
3987                &mut self.device,
3988                &projection,
3989                None,
3990                &mut self.renderer_errors,
3991            );
3992
3993            self.draw_instanced_batch(
3994                &target.line_decorations,
3995                VertexArrayKind::LineDecoration,
3996                &BatchTextures::empty(),
3997                stats,
3998            );
3999
4000            self.set_blend(false, FramebufferKind::Other);
4001        }
4002
4003        // Draw any fast path linear gradients for this target.
4004        if !target.fast_linear_gradients.is_empty() {
4005            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_FAST_LINEAR_GRADIENT);
4006
4007            self.set_blend(false, FramebufferKind::Other);
4008
4009            self.shaders.borrow_mut().cs_fast_linear_gradient.bind(
4010                &mut self.device,
4011                &projection,
4012                None,
4013                &mut self.renderer_errors,
4014            );
4015
4016            self.draw_instanced_batch(
4017                &target.fast_linear_gradients,
4018                VertexArrayKind::FastLinearGradient,
4019                &BatchTextures::empty(),
4020                stats,
4021            );
4022        }
4023
4024        // Draw any linear gradients for this target.
4025        if !target.linear_gradients.is_empty() {
4026            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINEAR_GRADIENT);
4027
4028            self.set_blend(false, FramebufferKind::Other);
4029
4030            self.shaders.borrow_mut().cs_linear_gradient.bind(
4031                &mut self.device,
4032                &projection,
4033                None,
4034                &mut self.renderer_errors,
4035            );
4036
4037            if let Some(ref texture) = self.dither_matrix_texture {
4038                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4039            }
4040
4041            self.draw_instanced_batch(
4042                &target.linear_gradients,
4043                VertexArrayKind::LinearGradient,
4044                &BatchTextures::empty(),
4045                stats,
4046            );
4047        }
4048
4049        // Draw any radial gradients for this target.
4050        if !target.radial_gradients.is_empty() {
4051            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_RADIAL_GRADIENT);
4052
4053            self.set_blend(false, FramebufferKind::Other);
4054
4055            self.shaders.borrow_mut().cs_radial_gradient.bind(
4056                &mut self.device,
4057                &projection,
4058                None,
4059                &mut self.renderer_errors,
4060            );
4061
4062            if let Some(ref texture) = self.dither_matrix_texture {
4063                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4064            }
4065
4066            self.draw_instanced_batch(
4067                &target.radial_gradients,
4068                VertexArrayKind::RadialGradient,
4069                &BatchTextures::empty(),
4070                stats,
4071            );
4072        }
4073
4074        // Draw any conic gradients for this target.
4075        if !target.conic_gradients.is_empty() {
4076            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CONIC_GRADIENT);
4077
4078            self.set_blend(false, FramebufferKind::Other);
4079
4080            self.shaders.borrow_mut().cs_conic_gradient.bind(
4081                &mut self.device,
4082                &projection,
4083                None,
4084                &mut self.renderer_errors,
4085            );
4086
4087            if let Some(ref texture) = self.dither_matrix_texture {
4088                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4089            }
4090
4091            self.draw_instanced_batch(
4092                &target.conic_gradients,
4093                VertexArrayKind::ConicGradient,
4094                &BatchTextures::empty(),
4095                stats,
4096            );
4097        }
4098
4099        // Draw any blurs for this target.
4100        if !target.horizontal_blurs.is_empty() {
4101            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
4102
4103            {
4104                let mut shaders = self.shaders.borrow_mut();
4105                match target.target_kind {
4106                    RenderTargetKind::Alpha => &mut shaders.cs_blur_a8,
4107                    RenderTargetKind::Color => &mut shaders.cs_blur_rgba8,
4108                }.bind(&mut self.device, &projection, None, &mut self.renderer_errors);
4109            }
4110
4111            self.draw_blurs(
4112                &target.horizontal_blurs,
4113                stats,
4114            );
4115        }
4116    }
4117
4118    fn update_deferred_resolves(&mut self, deferred_resolves: &[DeferredResolve]) -> Option<GpuCacheUpdateList> {
4119        // The first thing we do is run through any pending deferred
4120        // resolves, and use a callback to get the UV rect for this
4121        // custom item. Then we patch the resource_rects structure
4122        // here before it's uploaded to the GPU.
4123        if deferred_resolves.is_empty() {
4124            return None;
4125        }
4126
4127        let handler = self.external_image_handler
4128            .as_mut()
4129            .expect("Found external image, but no handler set!");
4130
4131        let mut list = GpuCacheUpdateList {
4132            frame_id: FrameId::INVALID,
4133            clear: false,
4134            height: self.gpu_cache_texture.get_height(),
4135            blocks: Vec::new(),
4136            updates: Vec::new(),
4137            debug_commands: Vec::new(),
4138        };
4139
4140        for (i, deferred_resolve) in deferred_resolves.iter().enumerate() {
4141            self.gpu_profiler.place_marker("deferred resolve");
4142            let props = &deferred_resolve.image_properties;
4143            let ext_image = props
4144                .external_image
4145                .expect("BUG: Deferred resolves must be external images!");
4146            // Provide rendering information for NativeTexture external images.
4147            let image = handler.lock(ext_image.id, ext_image.channel_index, deferred_resolve.rendering);
4148            let texture_target = match ext_image.image_type {
4149                ExternalImageType::TextureHandle(target) => target,
4150                ExternalImageType::Buffer => {
4151                    panic!("not a suitable image type in update_deferred_resolves()");
4152                }
4153            };
4154
4155            // In order to produce the handle, the external image handler may call into
4156            // the GL context and change some states.
4157            self.device.reset_state();
4158
4159            let texture = match image.source {
4160                ExternalImageSource::NativeTexture(texture_id) => {
4161                    ExternalTexture::new(
4162                        texture_id,
4163                        texture_target,
4164                        Swizzle::default(),
4165                        image.uv,
4166                    )
4167                }
4168                ExternalImageSource::Invalid => {
4169                    warn!("Invalid ext-image");
4170                    debug!(
4171                        "For ext_id:{:?}, channel:{}.",
4172                        ext_image.id,
4173                        ext_image.channel_index
4174                    );
4175                    // Just use 0 as the gl handle for this failed case.
4176                    ExternalTexture::new(
4177                        0,
4178                        texture_target,
4179                        Swizzle::default(),
4180                        image.uv,
4181                    )
4182                }
4183                ExternalImageSource::RawData(_) => {
4184                    panic!("Raw external data is not expected for deferred resolves!");
4185                }
4186            };
4187
4188            self.texture_resolver
4189                .external_images
4190                .insert(DeferredResolveIndex(i as u32), texture);
4191
4192            list.updates.push(GpuCacheUpdate::Copy {
4193                block_index: list.blocks.len(),
4194                block_count: BLOCKS_PER_UV_RECT,
4195                address: deferred_resolve.address,
4196            });
4197            list.blocks.push(image.uv.into());
4198            list.blocks.push([0f32; 4].into());
4199        }
4200
4201        Some(list)
4202    }
4203
4204    fn unlock_external_images(
4205        &mut self,
4206        deferred_resolves: &[DeferredResolve],
4207    ) {
4208        if !self.texture_resolver.external_images.is_empty() {
4209            let handler = self.external_image_handler
4210                .as_mut()
4211                .expect("Found external image, but no handler set!");
4212
4213            for (index, _) in self.texture_resolver.external_images.drain() {
4214                let props = &deferred_resolves[index.0 as usize].image_properties;
4215                let ext_image = props
4216                    .external_image
4217                    .expect("BUG: Deferred resolves must be external images!");
4218                handler.unlock(ext_image.id, ext_image.channel_index);
4219            }
4220        }
4221    }
4222
4223    /// Update the dirty rects based on current compositing mode and config
4224    // TODO(gw): This can be tidied up significantly once the Draw compositor
4225    //           is implemented in terms of the compositor trait.
4226    fn calculate_dirty_rects(
4227        &mut self,
4228        buffer_age: usize,
4229        composite_state: &CompositeState,
4230        draw_target_dimensions: DeviceIntSize,
4231        results: &mut RenderResults,
4232    ) -> Option<PartialPresentMode> {
4233        let mut partial_present_mode = None;
4234
4235        let (max_partial_present_rects, draw_previous_partial_present_regions) = match self.current_compositor_kind {
4236            CompositorKind::Native { .. } => {
4237                // Assume that we can return a single dirty rect for native
4238                // compositor for now, and that there is no buffer-age functionality.
4239                // These params can be exposed by the compositor capabilities struct
4240                // as the Draw compositor is ported to use it.
4241                (1, false)
4242            }
4243            CompositorKind::Draw { draw_previous_partial_present_regions, max_partial_present_rects } => {
4244                (max_partial_present_rects, draw_previous_partial_present_regions)
4245            }
4246        };
4247
4248        if max_partial_present_rects > 0 {
4249            let prev_frames_damage_rect = if let Some(..) = self.compositor_config.partial_present() {
4250                self.buffer_damage_tracker
4251                    .get_damage_rect(buffer_age)
4252                    .or_else(|| Some(DeviceRect::from_size(draw_target_dimensions.to_f32())))
4253            } else {
4254                None
4255            };
4256
4257            let can_use_partial_present =
4258                composite_state.dirty_rects_are_valid &&
4259                !self.force_redraw &&
4260                !(prev_frames_damage_rect.is_none() && draw_previous_partial_present_regions) &&
4261                !self.debug_overlay_state.is_enabled;
4262
4263            if can_use_partial_present {
4264                let mut combined_dirty_rect = DeviceRect::zero();
4265
4266                // Work out how many dirty rects WR produced, and if that's more than
4267                // what the device supports.
4268                for tile in &composite_state.tiles {
4269                    if tile.kind == TileKind::Clear {
4270                        continue;
4271                    }
4272                    let dirty_rect = composite_state.get_device_rect(
4273                        &tile.local_dirty_rect,
4274                        tile.transform_index,
4275                    );
4276                    combined_dirty_rect = combined_dirty_rect.union(&dirty_rect);
4277                }
4278
4279                let combined_dirty_rect = combined_dirty_rect.round();
4280                let combined_dirty_rect_i32 = combined_dirty_rect.to_i32();
4281                // Return this frame's dirty region. If nothing has changed, don't return any dirty
4282                // rects at all (the client can use this as a signal to skip present completely).
4283                if !combined_dirty_rect.is_empty() {
4284                    results.dirty_rects.push(combined_dirty_rect_i32);
4285                }
4286
4287                // Track this frame's dirty region, for calculating subsequent frames' damage.
4288                if draw_previous_partial_present_regions {
4289                    self.buffer_damage_tracker.push_dirty_rect(&combined_dirty_rect);
4290                }
4291
4292                // If the implementation requires manually keeping the buffer consistent,
4293                // then we must combine this frame's dirty region with that of previous frames
4294                // to determine the total_dirty_rect. The is used to determine what region we
4295                // render to, and is what we send to the compositor as the buffer damage region
4296                // (eg for KHR_partial_update).
4297                let total_dirty_rect = if draw_previous_partial_present_regions {
4298                    combined_dirty_rect.union(&prev_frames_damage_rect.unwrap())
4299                } else {
4300                    combined_dirty_rect
4301                };
4302
4303                partial_present_mode = Some(PartialPresentMode::Single {
4304                    dirty_rect: total_dirty_rect,
4305                });
4306            } else {
4307                // If we don't have a valid partial present scenario, return a single
4308                // dirty rect to the client that covers the entire framebuffer.
4309                let fb_rect = DeviceIntRect::from_size(
4310                    draw_target_dimensions,
4311                );
4312                results.dirty_rects.push(fb_rect);
4313
4314                if draw_previous_partial_present_regions {
4315                    self.buffer_damage_tracker.push_dirty_rect(&fb_rect.to_f32());
4316                }
4317            }
4318
4319            self.force_redraw = false;
4320        }
4321
4322        partial_present_mode
4323    }
4324
4325    fn bind_frame_data(&mut self, frame: &mut Frame) {
4326        profile_scope!("bind_frame_data");
4327
4328        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_DATA);
4329
4330        self.vertex_data_textures[self.current_vertex_data_textures].update(
4331            &mut self.device,
4332            &mut self.texture_upload_pbo_pool,
4333            frame,
4334        );
4335        self.current_vertex_data_textures =
4336            (self.current_vertex_data_textures + 1) % VERTEX_DATA_TEXTURE_COUNT;
4337    }
4338
4339    fn update_native_surfaces(&mut self) {
4340        profile_scope!("update_native_surfaces");
4341
4342        match self.compositor_config {
4343            CompositorConfig::Native { ref mut compositor, .. } => {
4344                for op in self.pending_native_surface_updates.drain(..) {
4345                    match op.details {
4346                        NativeSurfaceOperationDetails::CreateSurface { id, virtual_offset, tile_size, is_opaque } => {
4347                            let _inserted = self.allocated_native_surfaces.insert(id);
4348                            debug_assert!(_inserted, "bug: creating existing surface");
4349                            compositor.create_surface(
4350                                    id,
4351                                    virtual_offset,
4352                                    tile_size,
4353                                    is_opaque,
4354                            );
4355                        }
4356                        NativeSurfaceOperationDetails::CreateExternalSurface { id, is_opaque } => {
4357                            let _inserted = self.allocated_native_surfaces.insert(id);
4358                            debug_assert!(_inserted, "bug: creating existing surface");
4359                            compositor.create_external_surface(
4360                                id,
4361                                is_opaque,
4362                            );
4363                        }
4364                        NativeSurfaceOperationDetails::DestroySurface { id } => {
4365                            let _existed = self.allocated_native_surfaces.remove(&id);
4366                            debug_assert!(_existed, "bug: removing unknown surface");
4367                            compositor.destroy_surface(id);
4368                        }
4369                        NativeSurfaceOperationDetails::CreateTile { id } => {
4370                            compositor.create_tile(id);
4371                        }
4372                        NativeSurfaceOperationDetails::DestroyTile { id } => {
4373                            compositor.destroy_tile(id);
4374                        }
4375                        NativeSurfaceOperationDetails::AttachExternalImage { id, external_image } => {
4376                            compositor.attach_external_image(id, external_image);
4377                        }
4378                    }
4379                }
4380            }
4381            CompositorConfig::Draw { .. } => {
4382                // Ensure nothing is added in simple composite mode, since otherwise
4383                // memory will leak as this doesn't get drained
4384                debug_assert!(self.pending_native_surface_updates.is_empty());
4385            }
4386        }
4387    }
4388
4389    fn draw_frame(
4390        &mut self,
4391        frame: &mut Frame,
4392        device_size: Option<DeviceIntSize>,
4393        buffer_age: usize,
4394        results: &mut RenderResults,
4395    ) {
4396        profile_scope!("draw_frame");
4397
4398        // These markers seem to crash a lot on Android, see bug 1559834
4399        #[cfg(not(target_os = "android"))]
4400        let _gm = self.gpu_profiler.start_marker("draw frame");
4401
4402        if frame.passes.is_empty() {
4403            frame.has_been_rendered = true;
4404            return;
4405        }
4406
4407        self.device.disable_depth_write();
4408        self.set_blend(false, FramebufferKind::Other);
4409        self.device.disable_stencil();
4410
4411        self.bind_frame_data(frame);
4412
4413        // Determine the present mode and dirty rects, if device_size
4414        // is Some(..). If it's None, no composite will occur and only
4415        // picture cache and texture cache targets will be updated.
4416        // TODO(gw): Split Frame so that it's clearer when a composite
4417        //           is occurring.
4418        let present_mode = device_size.and_then(|device_size| {
4419            self.calculate_dirty_rects(
4420                buffer_age,
4421                &frame.composite_state,
4422                device_size,
4423                results,
4424            )
4425        });
4426
4427        // If we have a native OS compositor, then make use of that interface to
4428        // specify how to composite each of the picture cache surfaces. First, we
4429        // need to find each tile that may be bound and updated later in the frame
4430        // and invalidate it so that the native render compositor knows that these
4431        // tiles can't be composited early. Next, after all such tiles have been
4432        // invalidated, then we queue surfaces for native composition by the render
4433        // compositor before we actually update the tiles. This allows the render
4434        // compositor to start early composition while the tiles are updating.
4435        if let CompositorKind::Native { .. } = self.current_compositor_kind {
4436            let compositor = self.compositor_config.compositor().unwrap();
4437            // Invalidate any native surface tiles that might be updated by passes.
4438            if !frame.has_been_rendered {
4439                for tile in &frame.composite_state.tiles {
4440                    if tile.kind == TileKind::Clear {
4441                        continue;
4442                    }
4443                    if !tile.local_dirty_rect.is_empty() {
4444                        if let CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { id, .. } } = tile.surface {
4445                            let valid_rect = frame.composite_state.get_surface_rect(
4446                                &tile.local_valid_rect,
4447                                &tile.local_rect,
4448                                tile.transform_index,
4449                            ).to_i32();
4450
4451                            compositor.invalidate_tile(id, valid_rect);
4452                        }
4453                    }
4454                }
4455            }
4456            // Ensure any external surfaces that might be used during early composition
4457            // are invalidated first so that the native compositor can properly schedule
4458            // composition to happen only when the external surface is updated.
4459            // See update_external_native_surfaces for more details.
4460            for surface in &frame.composite_state.external_surfaces {
4461                if let Some((native_surface_id, size)) = surface.update_params {
4462                    let surface_rect = size.into();
4463                    compositor.invalidate_tile(NativeTileId { surface_id: native_surface_id, x: 0, y: 0 }, surface_rect);
4464                }
4465            }
4466            // Finally queue native surfaces for early composition, if applicable. By now,
4467            // we have already invalidated any tiles that such surfaces may depend upon, so
4468            // the native render compositor can keep track of when to actually schedule
4469            // composition as surfaces are updated.
4470            if device_size.is_some() {
4471                frame.composite_state.composite_native(
4472                    self.clear_color,
4473                    &results.dirty_rects,
4474                    &mut **compositor,
4475                );
4476            }
4477        }
4478
4479        for (_pass_index, pass) in frame.passes.iter_mut().enumerate() {
4480            #[cfg(not(target_os = "android"))]
4481            let _gm = self.gpu_profiler.start_marker(&format!("pass {}", _pass_index));
4482
4483            profile_scope!("offscreen target");
4484
4485            // If this frame has already been drawn, then any texture
4486            // cache targets have already been updated and can be
4487            // skipped this time.
4488            if !frame.has_been_rendered {
4489                for (&texture_id, target) in &pass.texture_cache {
4490                    self.draw_texture_cache_target(
4491                        &texture_id,
4492                        target,
4493                        &frame.render_tasks,
4494                        &mut results.stats,
4495                    );
4496                }
4497
4498                if !pass.picture_cache.is_empty() {
4499                    self.profile.inc(profiler::COLOR_PASSES);
4500                }
4501
4502                // Draw picture caching tiles for this pass.
4503                for picture_target in &pass.picture_cache {
4504                    results.stats.color_target_count += 1;
4505
4506                    let draw_target = match picture_target.surface {
4507                        ResolvedSurfaceTexture::TextureCache { ref texture } => {
4508                            let (texture, _) = self.texture_resolver
4509                                .resolve(texture)
4510                                .expect("bug");
4511
4512                            DrawTarget::from_texture(
4513                                texture,
4514                                true,
4515                            )
4516                        }
4517                        ResolvedSurfaceTexture::Native { id, size } => {
4518                            let surface_info = match self.current_compositor_kind {
4519                                CompositorKind::Native { .. } => {
4520                                    let compositor = self.compositor_config.compositor().unwrap();
4521                                    compositor.bind(
4522                                        id,
4523                                        picture_target.dirty_rect,
4524                                        picture_target.valid_rect,
4525                                    )
4526                                }
4527                                CompositorKind::Draw { .. } => {
4528                                    unreachable!();
4529                                }
4530                            };
4531
4532                            DrawTarget::NativeSurface {
4533                                offset: surface_info.origin,
4534                                external_fbo_id: surface_info.fbo_id,
4535                                dimensions: size,
4536                            }
4537                        }
4538                    };
4539
4540                    let projection = Transform3D::ortho(
4541                        0.0,
4542                        draw_target.dimensions().width as f32,
4543                        0.0,
4544                        draw_target.dimensions().height as f32,
4545                        self.device.ortho_near_plane(),
4546                        self.device.ortho_far_plane(),
4547                    );
4548
4549                    self.draw_picture_cache_target(
4550                        picture_target,
4551                        draw_target,
4552                        &projection,
4553                        &frame.render_tasks,
4554                        &mut results.stats,
4555                    );
4556
4557                    // Native OS surfaces must be unbound at the end of drawing to them
4558                    if let ResolvedSurfaceTexture::Native { .. } = picture_target.surface {
4559                        match self.current_compositor_kind {
4560                            CompositorKind::Native { .. } => {
4561                                let compositor = self.compositor_config.compositor().unwrap();
4562                                compositor.unbind();
4563                            }
4564                            CompositorKind::Draw { .. } => {
4565                                unreachable!();
4566                            }
4567                        }
4568                    }
4569                }
4570            }
4571
4572            for target in &pass.alpha.targets {
4573                results.stats.alpha_target_count += 1;
4574
4575                let texture_id = target.texture_id();
4576
4577                let alpha_tex = self.texture_resolver.get_cache_texture_mut(&texture_id);
4578
4579                let draw_target = DrawTarget::from_texture(
4580                    alpha_tex,
4581                    false,
4582                );
4583
4584                let projection = Transform3D::ortho(
4585                    0.0,
4586                    draw_target.dimensions().width as f32,
4587                    0.0,
4588                    draw_target.dimensions().height as f32,
4589                    self.device.ortho_near_plane(),
4590                    self.device.ortho_far_plane(),
4591                );
4592
4593                self.draw_alpha_target(
4594                    draw_target,
4595                    target,
4596                    &projection,
4597                    &frame.render_tasks,
4598                    &mut results.stats,
4599                );
4600            }
4601
4602            let color_rt_info = RenderTargetInfo { has_depth: pass.color.needs_depth() };
4603
4604            for target in &pass.color.targets {
4605                results.stats.color_target_count += 1;
4606
4607                let texture_id = target.texture_id();
4608
4609                let color_tex = self.texture_resolver.get_cache_texture_mut(&texture_id);
4610
4611                self.device.reuse_render_target::<u8>(
4612                    color_tex,
4613                    color_rt_info,
4614                );
4615
4616                let draw_target = DrawTarget::from_texture(
4617                    color_tex,
4618                    target.needs_depth(),
4619                );
4620
4621                let projection = Transform3D::ortho(
4622                    0.0,
4623                    draw_target.dimensions().width as f32,
4624                    0.0,
4625                    draw_target.dimensions().height as f32,
4626                    self.device.ortho_near_plane(),
4627                    self.device.ortho_far_plane(),
4628                );
4629
4630                let clear_depth = if target.needs_depth() {
4631                    Some(1.0)
4632                } else {
4633                    None
4634                };
4635
4636                self.draw_color_target(
4637                    draw_target,
4638                    target,
4639                    Some([0.0, 0.0, 0.0, 0.0]),
4640                    clear_depth,
4641                    &frame.render_tasks,
4642                    &projection,
4643                    &mut results.stats,
4644                );
4645            }
4646
4647            // Only end the pass here and invalidate previous textures for
4648            // off-screen targets. Deferring return of the inputs to the
4649            // frame buffer until the implicit end_pass in end_frame allows
4650            // debug draw overlays to be added without triggering a copy
4651            // resolve stage in mobile / tiled GPUs.
4652            self.texture_resolver.end_pass(
4653                &mut self.device,
4654                &pass.textures_to_invalidate,
4655            );
4656            {
4657                profile_scope!("gl.flush");
4658                self.device.gl().flush();
4659            }
4660        }
4661
4662        self.composite_frame(
4663            frame,
4664            device_size,
4665            results,
4666            present_mode,
4667        );
4668
4669        frame.has_been_rendered = true;
4670    }
4671
4672    fn composite_frame(
4673        &mut self,
4674        frame: &mut Frame,
4675        device_size: Option<DeviceIntSize>,
4676        results: &mut RenderResults,
4677        present_mode: Option<PartialPresentMode>,
4678    ) {
4679        profile_scope!("main target");
4680
4681        if let Some(device_size) = device_size {
4682            results.stats.color_target_count += 1;
4683            results.picture_cache_debug = mem::replace(
4684                &mut frame.composite_state.picture_cache_debug,
4685                PictureCacheDebugInfo::new(),
4686            );
4687
4688            let size = frame.device_rect.size().to_f32();
4689            let surface_origin_is_top_left = self.device.surface_origin_is_top_left();
4690            let (bottom, top) = if surface_origin_is_top_left {
4691              (0.0, size.height)
4692            } else {
4693              (size.height, 0.0)
4694            };
4695
4696            let projection = Transform3D::ortho(
4697                0.0,
4698                size.width,
4699                bottom,
4700                top,
4701                self.device.ortho_near_plane(),
4702                self.device.ortho_far_plane(),
4703            );
4704
4705            let fb_scale = Scale::<_, _, FramebufferPixel>::new(1i32);
4706            let mut fb_rect = frame.device_rect * fb_scale;
4707
4708            if !surface_origin_is_top_left {
4709                let h = fb_rect.height();
4710                fb_rect.min.y = device_size.height - fb_rect.max.y;
4711                fb_rect.max.y = fb_rect.min.y + h;
4712            }
4713
4714            let draw_target = DrawTarget::Default {
4715                rect: fb_rect,
4716                total_size: device_size * fb_scale,
4717                surface_origin_is_top_left,
4718            };
4719
4720            // If we have a native OS compositor, then make use of that interface
4721            // to specify how to composite each of the picture cache surfaces.
4722            match self.current_compositor_kind {
4723                CompositorKind::Native { .. } => {
4724                    // We have already queued surfaces for early native composition by this point.
4725                    // All that is left is to finally update any external native surfaces that were
4726                    // invalidated so that composition can complete.
4727                    self.update_external_native_surfaces(
4728                        &frame.composite_state.external_surfaces,
4729                        results,
4730                    );
4731                }
4732                CompositorKind::Draw { .. } => {
4733                    self.composite_simple(
4734                        &frame.composite_state,
4735                        draw_target,
4736                        &projection,
4737                        results,
4738                        present_mode,
4739                    );
4740                }
4741            }
4742        } else {
4743            // Rendering a frame without presenting it will confuse the partial
4744            // present logic, so force a full present for the next frame.
4745            self.force_redraw();
4746        }
4747    }
4748
4749    pub fn debug_renderer(&mut self) -> Option<&mut DebugRenderer> {
4750        self.debug.get_mut(&mut self.device)
4751    }
4752
4753    pub fn get_debug_flags(&self) -> DebugFlags {
4754        self.debug_flags
4755    }
4756
4757    pub fn set_debug_flags(&mut self, flags: DebugFlags) {
4758        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_TIME_QUERIES) {
4759            if enabled {
4760                self.gpu_profiler.enable_timers();
4761            } else {
4762                self.gpu_profiler.disable_timers();
4763            }
4764        }
4765        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_SAMPLE_QUERIES) {
4766            if enabled {
4767                self.gpu_profiler.enable_samplers();
4768            } else {
4769                self.gpu_profiler.disable_samplers();
4770            }
4771        }
4772
4773        self.device.set_use_batched_texture_uploads(flags.contains(DebugFlags::USE_BATCHED_TEXTURE_UPLOADS));
4774        self.device.set_use_draw_calls_for_texture_copy(flags.contains(DebugFlags::USE_DRAW_CALLS_FOR_TEXTURE_COPY));
4775
4776        self.debug_flags = flags;
4777    }
4778
4779    pub fn set_profiler_ui(&mut self, ui_str: &str) {
4780        self.profiler.set_ui(ui_str);
4781    }
4782
4783    fn draw_frame_debug_items(&mut self, items: &[DebugItem]) {
4784        if items.is_empty() {
4785            return;
4786        }
4787
4788        let debug_renderer = match self.debug.get_mut(&mut self.device) {
4789            Some(render) => render,
4790            None => return,
4791        };
4792
4793        for item in items {
4794            match item {
4795                DebugItem::Rect { rect, outer_color, inner_color } => {
4796                    debug_renderer.add_quad(
4797                        rect.min.x,
4798                        rect.min.y,
4799                        rect.max.x,
4800                        rect.max.y,
4801                        (*inner_color).into(),
4802                        (*inner_color).into(),
4803                    );
4804
4805                    debug_renderer.add_rect(
4806                        &rect.to_i32(),
4807                        (*outer_color).into(),
4808                    );
4809                }
4810                DebugItem::Text { ref msg, position, color } => {
4811                    debug_renderer.add_text(
4812                        position.x,
4813                        position.y,
4814                        msg,
4815                        (*color).into(),
4816                        None,
4817                    );
4818                }
4819            }
4820        }
4821    }
4822
4823    fn draw_render_target_debug(&mut self, draw_target: &DrawTarget) {
4824        if !self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
4825            return;
4826        }
4827
4828        let debug_renderer = match self.debug.get_mut(&mut self.device) {
4829            Some(render) => render,
4830            None => return,
4831        };
4832
4833        let textures = self.texture_resolver
4834            .texture_cache_map
4835            .values()
4836            .filter(|item| item.category == TextureCacheCategory::RenderTarget)
4837            .map(|item| &item.texture)
4838            .collect::<Vec<&Texture>>();
4839
4840        Self::do_debug_blit(
4841            &mut self.device,
4842            debug_renderer,
4843            textures,
4844            draw_target,
4845            0,
4846            &|_| [0.0, 1.0, 0.0, 1.0], // Use green for all RTs.
4847        );
4848    }
4849
4850    fn draw_zoom_debug(
4851        &mut self,
4852        device_size: DeviceIntSize,
4853    ) {
4854        if !self.debug_flags.contains(DebugFlags::ZOOM_DBG) {
4855            return;
4856        }
4857
4858        let debug_renderer = match self.debug.get_mut(&mut self.device) {
4859            Some(render) => render,
4860            None => return,
4861        };
4862
4863        let source_size = DeviceIntSize::new(64, 64);
4864        let target_size = DeviceIntSize::new(1024, 1024);
4865
4866        let source_origin = DeviceIntPoint::new(
4867            (self.cursor_position.x - source_size.width / 2)
4868                .min(device_size.width - source_size.width)
4869                .max(0),
4870            (self.cursor_position.y - source_size.height / 2)
4871                .min(device_size.height - source_size.height)
4872                .max(0),
4873        );
4874
4875        let source_rect = DeviceIntRect::from_origin_and_size(
4876            source_origin,
4877            source_size,
4878        );
4879
4880        let target_rect = DeviceIntRect::from_origin_and_size(
4881            DeviceIntPoint::new(
4882                device_size.width - target_size.width - 64,
4883                device_size.height - target_size.height - 64,
4884            ),
4885            target_size,
4886        );
4887
4888        let texture_rect = FramebufferIntRect::from_size(
4889            source_rect.size().cast_unit(),
4890        );
4891
4892        debug_renderer.add_rect(
4893            &target_rect.inflate(1, 1),
4894            debug_colors::RED.into(),
4895        );
4896
4897        if self.zoom_debug_texture.is_none() {
4898            let texture = self.device.create_texture(
4899                ImageBufferKind::Texture2D,
4900                ImageFormat::BGRA8,
4901                source_rect.width(),
4902                source_rect.height(),
4903                TextureFilter::Nearest,
4904                Some(RenderTargetInfo { has_depth: false }),
4905            );
4906
4907            self.zoom_debug_texture = Some(texture);
4908        }
4909
4910        // Copy frame buffer into the zoom texture
4911        let read_target = DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left());
4912        self.device.blit_render_target(
4913            read_target.into(),
4914            read_target.to_framebuffer_rect(source_rect),
4915            DrawTarget::from_texture(
4916                self.zoom_debug_texture.as_ref().unwrap(),
4917                false,
4918            ),
4919            texture_rect,
4920            TextureFilter::Nearest,
4921        );
4922
4923        // Draw the zoom texture back to the framebuffer
4924        self.device.blit_render_target(
4925            ReadTarget::from_texture(
4926                self.zoom_debug_texture.as_ref().unwrap(),
4927            ),
4928            texture_rect,
4929            read_target,
4930            read_target.to_framebuffer_rect(target_rect),
4931            TextureFilter::Nearest,
4932        );
4933    }
4934
4935    fn draw_texture_cache_debug(&mut self, draw_target: &DrawTarget) {
4936        if !self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
4937            return;
4938        }
4939
4940        let debug_renderer = match self.debug.get_mut(&mut self.device) {
4941            Some(render) => render,
4942            None => return,
4943        };
4944
4945        let textures = self.texture_resolver
4946            .texture_cache_map
4947            .values()
4948            .filter(|item| item.category == TextureCacheCategory::Atlas)
4949            .map(|item| &item.texture)
4950            .collect::<Vec<&Texture>>();
4951
4952        fn select_color(texture: &Texture) -> [f32; 4] {
4953            if texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE) {
4954                [1.0, 0.5, 0.0, 1.0] // Orange for shared.
4955            } else {
4956                [1.0, 0.0, 1.0, 1.0] // Fuchsia for standalone.
4957            }
4958        }
4959
4960        Self::do_debug_blit(
4961            &mut self.device,
4962            debug_renderer,
4963            textures,
4964            draw_target,
4965            if self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) { 544 } else { 0 },
4966            &select_color,
4967        );
4968    }
4969
4970    fn do_debug_blit(
4971        device: &mut Device,
4972        debug_renderer: &mut DebugRenderer,
4973        mut textures: Vec<&Texture>,
4974        draw_target: &DrawTarget,
4975        bottom: i32,
4976        select_color: &dyn Fn(&Texture) -> [f32; 4],
4977    ) {
4978        let mut spacing = 16;
4979        let mut size = 512;
4980
4981        let device_size = draw_target.dimensions();
4982        let fb_width = device_size.width;
4983        let fb_height = device_size.height;
4984        let surface_origin_is_top_left = draw_target.surface_origin_is_top_left();
4985
4986        let num_textures = textures.len() as i32;
4987
4988        if num_textures * (size + spacing) > fb_width {
4989            let factor = fb_width as f32 / (num_textures * (size + spacing)) as f32;
4990            size = (size as f32 * factor) as i32;
4991            spacing = (spacing as f32 * factor) as i32;
4992        }
4993
4994        let text_height = 14; // Visually approximated.
4995        let text_margin = 1;
4996        let tag_height = text_height + text_margin * 2;
4997        let tag_y = fb_height - (bottom + spacing + tag_height);
4998        let image_y = tag_y - size;
4999
5000        // Sort the display by size (in bytes), so that left-to-right is
5001        // largest-to-smallest.
5002        //
5003        // Note that the vec here is in increasing order, because the elements
5004        // get drawn right-to-left.
5005        textures.sort_by_key(|t| t.size_in_bytes());
5006
5007        let mut i = 0;
5008        for texture in textures.iter() {
5009            let dimensions = texture.get_dimensions();
5010            let src_rect = FramebufferIntRect::from_size(
5011                FramebufferIntSize::new(dimensions.width as i32, dimensions.height as i32),
5012            );
5013
5014            let x = fb_width - (spacing + size) * (i as i32 + 1);
5015
5016            // If we have more targets than fit on one row in screen, just early exit.
5017            if x > fb_width {
5018                return;
5019            }
5020
5021            // Draw the info tag.
5022            let tag_rect = rect(x, tag_y, size, tag_height).to_box2d();
5023            let tag_color = select_color(texture);
5024            device.clear_target(
5025                Some(tag_color),
5026                None,
5027                Some(draw_target.to_framebuffer_rect(tag_rect)),
5028            );
5029
5030            // Draw the dimensions onto the tag.
5031            let dim = texture.get_dimensions();
5032            let text_rect = tag_rect.inflate(-text_margin, -text_margin);
5033            debug_renderer.add_text(
5034                text_rect.min.x as f32,
5035                text_rect.max.y as f32, // Top-relative.
5036                &format!("{}x{}", dim.width, dim.height),
5037                ColorU::new(0, 0, 0, 255),
5038                Some(tag_rect.to_f32())
5039            );
5040
5041            // Blit the contents of the texture.
5042            let dest_rect = draw_target.to_framebuffer_rect(rect(x, image_y, size, size).to_box2d());
5043            let read_target = ReadTarget::from_texture(texture);
5044
5045            if surface_origin_is_top_left {
5046                device.blit_render_target(
5047                    read_target,
5048                    src_rect,
5049                    *draw_target,
5050                    dest_rect,
5051                    TextureFilter::Linear,
5052                );
5053            } else {
5054                 // Invert y.
5055                 device.blit_render_target_invert_y(
5056                    read_target,
5057                    src_rect,
5058                    *draw_target,
5059                    dest_rect,
5060                );
5061            }
5062            i += 1;
5063        }
5064    }
5065
5066    fn draw_epoch_debug(&mut self) {
5067        if !self.debug_flags.contains(DebugFlags::EPOCHS) {
5068            return;
5069        }
5070
5071        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5072            Some(render) => render,
5073            None => return,
5074        };
5075
5076        let dy = debug_renderer.line_height();
5077        let x0: f32 = 30.0;
5078        let y0: f32 = 30.0;
5079        let mut y = y0;
5080        let mut text_width = 0.0;
5081        for ((pipeline, document_id), epoch) in  &self.pipeline_info.epochs {
5082            y += dy;
5083            let w = debug_renderer.add_text(
5084                x0, y,
5085                &format!("({:?}, {:?}): {:?}", pipeline, document_id, epoch),
5086                ColorU::new(255, 255, 0, 255),
5087                None,
5088            ).size.width;
5089            text_width = f32::max(text_width, w);
5090        }
5091
5092        let margin = 10.0;
5093        debug_renderer.add_quad(
5094            x0 - margin,
5095            y0 - margin,
5096            x0 + text_width + margin,
5097            y + margin,
5098            ColorU::new(25, 25, 25, 200),
5099            ColorU::new(51, 51, 51, 200),
5100        );
5101    }
5102
5103    fn draw_gpu_cache_debug(&mut self, device_size: DeviceIntSize) {
5104        if !self.debug_flags.contains(DebugFlags::GPU_CACHE_DBG) {
5105            return;
5106        }
5107
5108        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5109            Some(render) => render,
5110            None => return,
5111        };
5112
5113        let (x_off, y_off) = (30f32, 30f32);
5114        let height = self.gpu_cache_texture.get_height()
5115            .min(device_size.height - (y_off as i32) * 2) as usize;
5116        debug_renderer.add_quad(
5117            x_off,
5118            y_off,
5119            x_off + MAX_VERTEX_TEXTURE_WIDTH as f32,
5120            y_off + height as f32,
5121            ColorU::new(80, 80, 80, 80),
5122            ColorU::new(80, 80, 80, 80),
5123        );
5124
5125        let upper = self.gpu_cache_debug_chunks.len().min(height);
5126        for chunk in self.gpu_cache_debug_chunks[0..upper].iter().flatten() {
5127            let color = ColorU::new(250, 0, 0, 200);
5128            debug_renderer.add_quad(
5129                x_off + chunk.address.u as f32,
5130                y_off + chunk.address.v as f32,
5131                x_off + chunk.address.u as f32 + chunk.size as f32,
5132                y_off + chunk.address.v as f32 + 1.0,
5133                color,
5134                color,
5135            );
5136        }
5137    }
5138
5139    /// Pass-through to `Device::read_pixels_into`, used by Gecko's WR bindings.
5140    pub fn read_pixels_into(&mut self, rect: FramebufferIntRect, format: ImageFormat, output: &mut [u8]) {
5141        self.device.read_pixels_into(rect, format, output);
5142    }
5143
5144    pub fn read_pixels_rgba8(&mut self, rect: FramebufferIntRect) -> Vec<u8> {
5145        let mut pixels = vec![0; (rect.area() * 4) as usize];
5146        self.device.read_pixels_into(rect, ImageFormat::RGBA8, &mut pixels);
5147        pixels
5148    }
5149
5150    // De-initialize the Renderer safely, assuming the GL is still alive and active.
5151    pub fn deinit(mut self) {
5152        //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
5153        self.device.begin_frame();
5154        // If we are using a native compositor, ensure that any remaining native
5155        // surfaces are freed.
5156        if let CompositorConfig::Native { mut compositor, .. } = self.compositor_config {
5157            for id in self.allocated_native_surfaces.drain() {
5158                compositor.destroy_surface(id);
5159            }
5160            // Destroy the debug overlay surface, if currently allocated.
5161            if self.debug_overlay_state.current_size.is_some() {
5162                compositor.destroy_surface(NativeSurfaceId::DEBUG_OVERLAY);
5163            }
5164            compositor.deinit();
5165        }
5166        self.gpu_cache_texture.deinit(&mut self.device);
5167        if let Some(dither_matrix_texture) = self.dither_matrix_texture {
5168            self.device.delete_texture(dither_matrix_texture);
5169        }
5170        if let Some(zoom_debug_texture) = self.zoom_debug_texture {
5171            self.device.delete_texture(zoom_debug_texture);
5172        }
5173        for textures in self.vertex_data_textures.drain(..) {
5174            textures.deinit(&mut self.device);
5175        }
5176        self.texture_upload_pbo_pool.deinit(&mut self.device);
5177        self.staging_texture_pool.delete_textures(&mut self.device);
5178        self.texture_resolver.deinit(&mut self.device);
5179        self.vaos.deinit(&mut self.device);
5180        self.debug.deinit(&mut self.device);
5181
5182        if let Ok(shaders) = Rc::try_unwrap(self.shaders) {
5183            shaders.into_inner().deinit(&mut self.device);
5184        }
5185
5186        if let Some(async_screenshots) = self.async_screenshots.take() {
5187            async_screenshots.deinit(&mut self.device);
5188        }
5189
5190        if let Some(async_frame_recorder) = self.async_frame_recorder.take() {
5191            async_frame_recorder.deinit(&mut self.device);
5192        }
5193
5194        #[cfg(feature = "capture")]
5195        self.device.delete_fbo(self.read_fbo);
5196        #[cfg(feature = "replay")]
5197        for (_, ext) in self.owned_external_images {
5198            self.device.delete_external_texture(ext);
5199        }
5200        self.device.end_frame();
5201    }
5202
5203    fn size_of<T>(&self, ptr: *const T) -> usize {
5204        let ops = self.size_of_ops.as_ref().unwrap();
5205        unsafe { ops.malloc_size_of(ptr) }
5206    }
5207
5208    /// Collects a memory report.
5209    pub fn report_memory(&self, swgl: *mut c_void) -> MemoryReport {
5210        let mut report = MemoryReport::default();
5211
5212        // GPU cache CPU memory.
5213        self.gpu_cache_texture.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5214
5215        self.staging_texture_pool.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5216
5217        // Render task CPU memory.
5218        for (_id, doc) in &self.active_documents {
5219            report.render_tasks += self.size_of(doc.frame.render_tasks.tasks.as_ptr());
5220            report.render_tasks += self.size_of(doc.frame.render_tasks.task_data.as_ptr());
5221        }
5222
5223        // Vertex data GPU memory.
5224        for textures in &self.vertex_data_textures {
5225            report.vertex_data_textures += textures.size_in_bytes();
5226        }
5227
5228        // Texture cache and render target GPU memory.
5229        report += self.texture_resolver.report_memory();
5230
5231        // Texture upload PBO memory.
5232        report += self.texture_upload_pbo_pool.report_memory();
5233
5234        // Textures held internally within the device layer.
5235        report += self.device.report_memory(self.size_of_ops.as_ref().unwrap(), swgl);
5236
5237        report
5238    }
5239
5240    // Sets the blend mode. Blend is unconditionally set if the "show overdraw" debugging mode is
5241    // enabled.
5242    fn set_blend(&mut self, mut blend: bool, framebuffer_kind: FramebufferKind) {
5243        if framebuffer_kind == FramebufferKind::Main &&
5244                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5245            blend = true
5246        }
5247        self.device.set_blend(blend)
5248    }
5249
5250    fn set_blend_mode_multiply(&mut self, framebuffer_kind: FramebufferKind) {
5251        if framebuffer_kind == FramebufferKind::Main &&
5252                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5253            self.device.set_blend_mode_show_overdraw();
5254        } else {
5255            self.device.set_blend_mode_multiply();
5256        }
5257    }
5258
5259    fn set_blend_mode_premultiplied_alpha(&mut self, framebuffer_kind: FramebufferKind) {
5260        if framebuffer_kind == FramebufferKind::Main &&
5261                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5262            self.device.set_blend_mode_show_overdraw();
5263        } else {
5264            self.device.set_blend_mode_premultiplied_alpha();
5265        }
5266    }
5267
5268    fn set_blend_mode_subpixel_with_bg_color_pass1(&mut self, framebuffer_kind: FramebufferKind) {
5269        if framebuffer_kind == FramebufferKind::Main &&
5270                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5271            self.device.set_blend_mode_show_overdraw();
5272        } else {
5273            self.device.set_blend_mode_subpixel_with_bg_color_pass1();
5274        }
5275    }
5276
5277    fn set_blend_mode_subpixel_with_bg_color_pass2(&mut self, framebuffer_kind: FramebufferKind) {
5278        if framebuffer_kind == FramebufferKind::Main &&
5279                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5280            self.device.set_blend_mode_show_overdraw();
5281        } else {
5282            self.device.set_blend_mode_subpixel_with_bg_color_pass2();
5283        }
5284    }
5285
5286    /// Clears the texture with a given color.
5287    fn clear_texture(&mut self, texture: &Texture, color: [f32; 4]) {
5288        self.device.bind_draw_target(DrawTarget::from_texture(
5289            &texture,
5290            false,
5291        ));
5292        self.device.clear_target(Some(color), None, None);
5293    }
5294}
5295
5296/// Allows callers to hook in at certain points of the async scene build. These
5297/// functions are all called from the scene builder thread.
5298pub trait SceneBuilderHooks {
5299    /// This is called exactly once, when the scene builder thread is started
5300    /// and before it processes anything.
5301    fn register(&self);
5302    /// This is called before each scene build starts.
5303    fn pre_scene_build(&self);
5304    /// This is called before each scene swap occurs.
5305    fn pre_scene_swap(&self, scenebuild_time: u64);
5306    /// This is called after each scene swap occurs. The PipelineInfo contains
5307    /// the updated epochs and pipelines removed in the new scene compared to
5308    /// the old scene.
5309    fn post_scene_swap(&self, document_id: &Vec<DocumentId>, info: PipelineInfo, sceneswap_time: u64);
5310    /// This is called after a resource update operation on the scene builder
5311    /// thread, in the case where resource updates were applied without a scene
5312    /// build.
5313    fn post_resource_update(&self, document_ids: &Vec<DocumentId>);
5314    /// This is called after a scene build completes without any changes being
5315    /// made. We guarantee that each pre_scene_build call will be matched with
5316    /// exactly one of post_scene_swap, post_resource_update or
5317    /// post_empty_scene_build.
5318    fn post_empty_scene_build(&self);
5319    /// This is a generic callback which provides an opportunity to run code
5320    /// on the scene builder thread. This is called as part of the main message
5321    /// loop of the scene builder thread, but outside of any specific message
5322    /// handler.
5323    fn poke(&self);
5324    /// This is called exactly once, when the scene builder thread is about to
5325    /// terminate.
5326    fn deregister(&self);
5327}
5328
5329/// Allows callers to hook into the main render_backend loop and provide
5330/// additional frame ops for generate_frame transactions. These functions
5331/// are all called from the render backend thread.
5332pub trait AsyncPropertySampler {
5333    /// This is called exactly once, when the render backend thread is started
5334    /// and before it processes anything.
5335    fn register(&self);
5336    /// This is called for each transaction with the generate_frame flag set
5337    /// (i.e. that will trigger a render). The list of frame messages returned
5338    /// are processed as though they were part of the original transaction.
5339    fn sample(&self, document_id: DocumentId, generated_frame_id: Option<u64>) -> Vec<FrameMsg>;
5340    /// This is called exactly once, when the render backend thread is about to
5341    /// terminate.
5342    fn deregister(&self);
5343}
5344
5345bitflags! {
5346    /// Flags that control how shaders are pre-cached, if at all.
5347    #[derive(Default)]
5348    pub struct ShaderPrecacheFlags: u32 {
5349        /// Needed for const initialization
5350        const EMPTY                 = 0;
5351
5352        /// Only start async compile
5353        const ASYNC_COMPILE         = 1 << 2;
5354
5355        /// Do a full compile/link during startup
5356        const FULL_COMPILE          = 1 << 3;
5357    }
5358}
5359
5360pub struct RendererOptions {
5361    pub resource_override_path: Option<PathBuf>,
5362    /// Whether to use shaders that have been optimized at build time.
5363    pub use_optimized_shaders: bool,
5364    pub enable_aa: bool,
5365    pub enable_dithering: bool,
5366    pub max_recorded_profiles: usize,
5367    pub precache_flags: ShaderPrecacheFlags,
5368    /// Enable sub-pixel anti-aliasing if a fast implementation is available.
5369    pub enable_subpixel_aa: bool,
5370    /// Enable sub-pixel anti-aliasing if it requires a slow implementation.
5371    pub force_subpixel_aa: bool,
5372    pub clear_color: ColorF,
5373    pub enable_clear_scissor: bool,
5374    pub max_internal_texture_size: Option<i32>,
5375    pub image_tiling_threshold: i32,
5376    pub upload_method: UploadMethod,
5377    /// The default size in bytes for PBOs used to upload texture data.
5378    pub upload_pbo_default_size: usize,
5379    pub workers: Option<Arc<ThreadPool>>,
5380    pub enable_multithreading: bool,
5381    pub blob_image_handler: Option<Box<dyn BlobImageHandler>>,
5382    pub crash_annotator: Option<Box<dyn CrashAnnotator>>,
5383    pub size_of_op: Option<VoidPtrToSizeFn>,
5384    pub enclosing_size_of_op: Option<VoidPtrToSizeFn>,
5385    pub cached_programs: Option<Rc<ProgramCache>>,
5386    pub debug_flags: DebugFlags,
5387    pub renderer_id: Option<u64>,
5388    pub scene_builder_hooks: Option<Box<dyn SceneBuilderHooks + Send>>,
5389    pub sampler: Option<Box<dyn AsyncPropertySampler + Send>>,
5390    pub chase_primitive: ChasePrimitive,
5391    pub support_low_priority_transactions: bool,
5392    pub namespace_alloc_by_client: bool,
5393    pub testing: bool,
5394    /// Set to true if this GPU supports hardware fast clears as a performance
5395    /// optimization. Likely requires benchmarking on various GPUs to see if
5396    /// it is a performance win. The default is false, which tends to be best
5397    /// performance on lower end / integrated GPUs.
5398    pub gpu_supports_fast_clears: bool,
5399    pub allow_dual_source_blending: bool,
5400    pub allow_advanced_blend_equation: bool,
5401    /// If true, allow textures to be initialized with glTexStorage.
5402    /// This affects VRAM consumption and data upload paths.
5403    pub allow_texture_storage_support: bool,
5404    /// If true, we allow the data uploaded in a different format from the
5405    /// one expected by the driver, pretending the format is matching, and
5406    /// swizzling the components on all the shader sampling.
5407    pub allow_texture_swizzling: bool,
5408    /// Use `ps_clear` shader with batched quad rendering to clear the rects
5409    /// in texture cache and picture cache tasks.
5410    /// This helps to work around some Intel drivers
5411    /// that incorrectly synchronize clears to following draws.
5412    pub clear_caches_with_quads: bool,
5413    /// Output the source of the shader with the given name.
5414    pub dump_shader_source: Option<String>,
5415    pub surface_origin_is_top_left: bool,
5416    /// The configuration options defining how WR composites the final scene.
5417    pub compositor_config: CompositorConfig,
5418    pub enable_gpu_markers: bool,
5419    /// If true, panic whenever a GL error occurs. This has a significant
5420    /// performance impact, so only use when debugging specific problems!
5421    pub panic_on_gl_error: bool,
5422    pub picture_tile_size: Option<DeviceIntSize>,
5423    pub texture_cache_config: TextureCacheConfig,
5424    /// If true, we'll use instanced vertex attributes. Each instace is a quad.
5425    /// If false, we'll duplicate the instance attributes per vertex and issue
5426    /// regular indexed draws instead.
5427    pub enable_instancing: bool,
5428    /// If true, we'll reject contexts backed by a software rasterizer, except
5429    /// Software WebRender.
5430    pub reject_software_rasterizer: bool,
5431    /// If enabled, pinch-zoom will apply the zoom factor during compositing
5432    /// of picture cache tiles. This is higher performance (tiles are not
5433    /// re-rasterized during zoom) but lower quality result. For most display
5434    /// items, if the zoom factor is relatively small, bilinear filtering should
5435    /// make the result look quite close to the high-quality zoom, except for glyphs.
5436    pub low_quality_pinch_zoom: bool,
5437}
5438
5439impl RendererOptions {
5440    /// Number of batches to look back in history for adding the current
5441    /// transparent instance into.
5442    const BATCH_LOOKBACK_COUNT: usize = 10;
5443
5444    /// Since we are re-initializing the instance buffers on every draw call,
5445    /// the driver has to internally manage PBOs in flight.
5446    /// It's typically done by bucketing up to a specific limit, and then
5447    /// just individually managing the largest buffers.
5448    /// Having a limit here allows the drivers to more easily manage
5449    /// the PBOs for us.
5450    const MAX_INSTANCE_BUFFER_SIZE: usize = 0x20000; // actual threshold in macOS GL drivers
5451}
5452
5453impl Default for RendererOptions {
5454    fn default() -> Self {
5455        RendererOptions {
5456            resource_override_path: None,
5457            use_optimized_shaders: false,
5458            enable_aa: true,
5459            enable_dithering: false,
5460            debug_flags: DebugFlags::empty(),
5461            max_recorded_profiles: 0,
5462            precache_flags: ShaderPrecacheFlags::empty(),
5463            enable_subpixel_aa: false,
5464            force_subpixel_aa: false,
5465            clear_color: ColorF::new(1.0, 1.0, 1.0, 1.0),
5466            enable_clear_scissor: true,
5467            max_internal_texture_size: None,
5468            image_tiling_threshold: 4096,
5469            // This is best as `Immediate` on Angle, or `Pixelbuffer(Dynamic)` on GL,
5470            // but we are unable to make this decision here, so picking the reasonable medium.
5471            upload_method: UploadMethod::PixelBuffer(ONE_TIME_USAGE_HINT),
5472            upload_pbo_default_size: 512 * 512 * 4,
5473            workers: None,
5474            enable_multithreading: true,
5475            blob_image_handler: None,
5476            crash_annotator: None,
5477            size_of_op: None,
5478            enclosing_size_of_op: None,
5479            renderer_id: None,
5480            cached_programs: None,
5481            scene_builder_hooks: None,
5482            sampler: None,
5483            chase_primitive: ChasePrimitive::Nothing,
5484            support_low_priority_transactions: false,
5485            namespace_alloc_by_client: false,
5486            testing: false,
5487            gpu_supports_fast_clears: false,
5488            allow_dual_source_blending: true,
5489            allow_advanced_blend_equation: false,
5490            allow_texture_storage_support: true,
5491            allow_texture_swizzling: true,
5492            clear_caches_with_quads: true,
5493            dump_shader_source: None,
5494            surface_origin_is_top_left: false,
5495            compositor_config: CompositorConfig::default(),
5496            enable_gpu_markers: true,
5497            panic_on_gl_error: false,
5498            picture_tile_size: None,
5499            texture_cache_config: TextureCacheConfig::DEFAULT,
5500            // Disabling instancing means more vertex data to upload and potentially
5501            // process by the vertex shaders.
5502            enable_instancing: true,
5503            reject_software_rasterizer: false,
5504            low_quality_pinch_zoom: false,
5505        }
5506    }
5507}
5508
5509/// The cumulative times spent in each painting phase to generate this frame.
5510#[derive(Debug, Default)]
5511pub struct FullFrameStats {
5512    pub full_display_list: bool,
5513    pub gecko_display_list_time: f64,
5514    pub wr_display_list_time: f64,
5515    pub scene_build_time: f64,
5516    pub frame_build_time: f64,
5517}
5518
5519impl FullFrameStats {
5520    pub fn merge(&self, other: &FullFrameStats) -> Self {
5521        Self {
5522            full_display_list: self.full_display_list || other.full_display_list,
5523            gecko_display_list_time: self.gecko_display_list_time + other.gecko_display_list_time,
5524            wr_display_list_time: self.wr_display_list_time + other.wr_display_list_time,
5525            scene_build_time: self.scene_build_time + other.scene_build_time,
5526            frame_build_time: self.frame_build_time + other.frame_build_time
5527        }
5528    }
5529
5530    pub fn total(&self) -> f64 {
5531      self.gecko_display_list_time + self.wr_display_list_time + self.scene_build_time + self.frame_build_time
5532    }
5533}
5534
5535/// Some basic statistics about the rendered scene, used in Gecko, as
5536/// well as in wrench reftests to ensure that tests are batching and/or
5537/// allocating on render targets as we expect them to.
5538#[repr(C)]
5539#[derive(Debug, Default)]
5540pub struct RendererStats {
5541    pub total_draw_calls: usize,
5542    pub alpha_target_count: usize,
5543    pub color_target_count: usize,
5544    pub texture_upload_mb: f64,
5545    pub resource_upload_time: f64,
5546    pub gpu_cache_upload_time: f64,
5547    pub gecko_display_list_time: f64,
5548    pub wr_display_list_time: f64,
5549    pub scene_build_time: f64,
5550    pub frame_build_time: f64,
5551    pub full_display_list: bool,
5552    pub full_paint: bool,
5553}
5554
5555impl RendererStats {
5556    pub fn merge(&mut self, stats: &FullFrameStats) {
5557        self.gecko_display_list_time = stats.gecko_display_list_time;
5558        self.wr_display_list_time = stats.wr_display_list_time;
5559        self.scene_build_time = stats.scene_build_time;
5560        self.frame_build_time = stats.frame_build_time;
5561        self.full_display_list = stats.full_display_list;
5562        self.full_paint = true;
5563    }
5564}
5565
5566/// Return type from render(), which contains some repr(C) statistics as well as
5567/// some non-repr(C) data.
5568#[derive(Debug, Default)]
5569pub struct RenderResults {
5570    /// Statistics about the frame that was rendered.
5571    pub stats: RendererStats,
5572
5573    /// A list of the device dirty rects that were updated
5574    /// this frame.
5575    /// TODO(gw): This is an initial interface, likely to change in future.
5576    /// TODO(gw): The dirty rects here are currently only useful when scrolling
5577    ///           is not occurring. They are still correct in the case of
5578    ///           scrolling, but will be very large (until we expose proper
5579    ///           OS compositor support where the dirty rects apply to a
5580    ///           specific picture cache slice / OS compositor surface).
5581    pub dirty_rects: Vec<DeviceIntRect>,
5582
5583    /// Information about the state of picture cache tiles. This is only
5584    /// allocated and stored if config.testing is true (such as wrench)
5585    pub picture_cache_debug: PictureCacheDebugInfo,
5586}
5587
5588#[cfg(any(feature = "capture", feature = "replay"))]
5589#[cfg_attr(feature = "capture", derive(Serialize))]
5590#[cfg_attr(feature = "replay", derive(Deserialize))]
5591struct PlainTexture {
5592    data: String,
5593    size: DeviceIntSize,
5594    format: ImageFormat,
5595    filter: TextureFilter,
5596    has_depth: bool,
5597    category: Option<TextureCacheCategory>,
5598}
5599
5600
5601#[cfg(any(feature = "capture", feature = "replay"))]
5602#[cfg_attr(feature = "capture", derive(Serialize))]
5603#[cfg_attr(feature = "replay", derive(Deserialize))]
5604struct PlainRenderer {
5605    device_size: Option<DeviceIntSize>,
5606    gpu_cache: PlainTexture,
5607    gpu_cache_frame_id: FrameId,
5608    textures: FastHashMap<CacheTextureId, PlainTexture>,
5609}
5610
5611#[cfg(any(feature = "capture", feature = "replay"))]
5612#[cfg_attr(feature = "capture", derive(Serialize))]
5613#[cfg_attr(feature = "replay", derive(Deserialize))]
5614struct PlainExternalResources {
5615    images: Vec<ExternalCaptureImage>
5616}
5617
5618#[cfg(feature = "replay")]
5619enum CapturedExternalImageData {
5620    NativeTexture(gl::GLuint),
5621    Buffer(Arc<Vec<u8>>),
5622}
5623
5624#[cfg(feature = "replay")]
5625struct DummyExternalImageHandler {
5626    data: FastHashMap<(ExternalImageId, u8), (CapturedExternalImageData, TexelRect)>,
5627}
5628
5629#[cfg(feature = "replay")]
5630impl ExternalImageHandler for DummyExternalImageHandler {
5631    fn lock(&mut self, key: ExternalImageId, channel_index: u8, _rendering: ImageRendering) -> ExternalImage {
5632        let (ref captured_data, ref uv) = self.data[&(key, channel_index)];
5633        ExternalImage {
5634            uv: *uv,
5635            source: match *captured_data {
5636                CapturedExternalImageData::NativeTexture(tid) => ExternalImageSource::NativeTexture(tid),
5637                CapturedExternalImageData::Buffer(ref arc) => ExternalImageSource::RawData(&*arc),
5638            }
5639        }
5640    }
5641    fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {}
5642}
5643
5644#[derive(Default)]
5645pub struct PipelineInfo {
5646    pub epochs: FastHashMap<(PipelineId, DocumentId), Epoch>,
5647    pub removed_pipelines: Vec<(PipelineId, DocumentId)>,
5648}
5649
5650impl Renderer {
5651    #[cfg(feature = "capture")]
5652    fn save_texture(
5653        texture: &Texture, category: Option<TextureCacheCategory>, name: &str, root: &PathBuf, device: &mut Device
5654    ) -> PlainTexture {
5655        use std::fs;
5656        use std::io::Write;
5657
5658        let short_path = format!("textures/{}.raw", name);
5659
5660        let bytes_per_pixel = texture.get_format().bytes_per_pixel();
5661        let read_format = texture.get_format();
5662        let rect_size = texture.get_dimensions();
5663
5664        let mut file = fs::File::create(root.join(&short_path))
5665            .expect(&format!("Unable to create {}", short_path));
5666        let bytes_per_texture = (rect_size.width * rect_size.height * bytes_per_pixel) as usize;
5667        let mut data = vec![0; bytes_per_texture];
5668
5669        //TODO: instead of reading from an FBO with `read_pixels*`, we could
5670        // read from textures directly with `get_tex_image*`.
5671
5672        let rect = device_size_as_framebuffer_size(rect_size).into();
5673
5674        device.attach_read_texture(texture);
5675        #[cfg(feature = "png")]
5676        {
5677            let mut png_data;
5678            let (data_ref, format) = match texture.get_format() {
5679                ImageFormat::RGBAF32 => {
5680                    png_data = vec![0; (rect_size.width * rect_size.height * 4) as usize];
5681                    device.read_pixels_into(rect, ImageFormat::RGBA8, &mut png_data);
5682                    (&png_data, ImageFormat::RGBA8)
5683                }
5684                fm => (&data, fm),
5685            };
5686            CaptureConfig::save_png(
5687                root.join(format!("textures/{}-{}.png", name, 0)),
5688                rect_size, format,
5689                None,
5690                data_ref,
5691            );
5692        }
5693        device.read_pixels_into(rect, read_format, &mut data);
5694        file.write_all(&data)
5695            .unwrap();
5696
5697        PlainTexture {
5698            data: short_path,
5699            size: rect_size,
5700            format: texture.get_format(),
5701            filter: texture.get_filter(),
5702            has_depth: texture.supports_depth(),
5703            category,
5704        }
5705    }
5706
5707    #[cfg(feature = "replay")]
5708    fn load_texture(
5709        target: ImageBufferKind,
5710        plain: &PlainTexture,
5711        rt_info: Option<RenderTargetInfo>,
5712        root: &PathBuf,
5713        device: &mut Device
5714    ) -> (Texture, Vec<u8>)
5715    {
5716        use std::fs::File;
5717        use std::io::Read;
5718
5719        let mut texels = Vec::new();
5720        File::open(root.join(&plain.data))
5721            .expect(&format!("Unable to open texture at {}", plain.data))
5722            .read_to_end(&mut texels)
5723            .unwrap();
5724
5725        let texture = device.create_texture(
5726            target,
5727            plain.format,
5728            plain.size.width,
5729            plain.size.height,
5730            plain.filter,
5731            rt_info,
5732        );
5733        device.upload_texture_immediate(&texture, &texels);
5734
5735        (texture, texels)
5736    }
5737
5738    #[cfg(feature = "capture")]
5739    fn save_capture(
5740        &mut self,
5741        config: CaptureConfig,
5742        deferred_images: Vec<ExternalCaptureImage>,
5743    ) {
5744        use std::fs;
5745        use std::io::Write;
5746        use api::ExternalImageData;
5747        use crate::render_api::CaptureBits;
5748
5749        let root = config.resource_root();
5750
5751        self.device.begin_frame();
5752        let _gm = self.gpu_profiler.start_marker("read GPU data");
5753        self.device.bind_read_target_impl(self.read_fbo, DeviceIntPoint::zero());
5754
5755        if config.bits.contains(CaptureBits::EXTERNAL_RESOURCES) && !deferred_images.is_empty() {
5756            info!("saving external images");
5757            let mut arc_map = FastHashMap::<*const u8, String>::default();
5758            let mut tex_map = FastHashMap::<u32, String>::default();
5759            let handler = self.external_image_handler
5760                .as_mut()
5761                .expect("Unable to lock the external image handler!");
5762            for def in &deferred_images {
5763                info!("\t{}", def.short_path);
5764                let ExternalImageData { id, channel_index, image_type } = def.external;
5765                // The image rendering parameter is irrelevant because no filtering happens during capturing.
5766                let ext_image = handler.lock(id, channel_index, ImageRendering::Auto);
5767                let (data, short_path) = match ext_image.source {
5768                    ExternalImageSource::RawData(data) => {
5769                        let arc_id = arc_map.len() + 1;
5770                        match arc_map.entry(data.as_ptr()) {
5771                            Entry::Occupied(e) => {
5772                                (None, e.get().clone())
5773                            }
5774                            Entry::Vacant(e) => {
5775                                let short_path = format!("externals/d{}.raw", arc_id);
5776                                (Some(data.to_vec()), e.insert(short_path).clone())
5777                            }
5778                        }
5779                    }
5780                    ExternalImageSource::NativeTexture(gl_id) => {
5781                        let tex_id = tex_map.len() + 1;
5782                        match tex_map.entry(gl_id) {
5783                            Entry::Occupied(e) => {
5784                                (None, e.get().clone())
5785                            }
5786                            Entry::Vacant(e) => {
5787                                let target = match image_type {
5788                                    ExternalImageType::TextureHandle(target) => target,
5789                                    ExternalImageType::Buffer => unreachable!(),
5790                                };
5791                                info!("\t\tnative texture of target {:?}", target);
5792                                self.device.attach_read_texture_external(gl_id, target);
5793                                let data = self.device.read_pixels(&def.descriptor);
5794                                let short_path = format!("externals/t{}.raw", tex_id);
5795                                (Some(data), e.insert(short_path).clone())
5796                            }
5797                        }
5798                    }
5799                    ExternalImageSource::Invalid => {
5800                        info!("\t\tinvalid source!");
5801                        (None, String::new())
5802                    }
5803                };
5804                if let Some(bytes) = data {
5805                    fs::File::create(root.join(&short_path))
5806                        .expect(&format!("Unable to create {}", short_path))
5807                        .write_all(&bytes)
5808                        .unwrap();
5809                    #[cfg(feature = "png")]
5810                    CaptureConfig::save_png(
5811                        root.join(&short_path).with_extension("png"),
5812                        def.descriptor.size,
5813                        def.descriptor.format,
5814                        def.descriptor.stride,
5815                        &bytes,
5816                    );
5817                }
5818                let plain = PlainExternalImage {
5819                    data: short_path,
5820                    external: def.external,
5821                    uv: ext_image.uv,
5822                };
5823                config.serialize_for_resource(&plain, &def.short_path);
5824            }
5825            for def in &deferred_images {
5826                handler.unlock(def.external.id, def.external.channel_index);
5827            }
5828            let plain_external = PlainExternalResources {
5829                images: deferred_images,
5830            };
5831            config.serialize_for_resource(&plain_external, "external_resources");
5832        }
5833
5834        if config.bits.contains(CaptureBits::FRAME) {
5835            let path_textures = root.join("textures");
5836            if !path_textures.is_dir() {
5837                fs::create_dir(&path_textures).unwrap();
5838            }
5839
5840            info!("saving GPU cache");
5841            self.update_gpu_cache(); // flush pending updates
5842            let mut plain_self = PlainRenderer {
5843                device_size: self.device_size,
5844                gpu_cache: Self::save_texture(
5845                    self.gpu_cache_texture.get_texture(),
5846                    None, "gpu", &root, &mut self.device,
5847                ),
5848                gpu_cache_frame_id: self.gpu_cache_frame_id,
5849                textures: FastHashMap::default(),
5850            };
5851
5852            info!("saving cached textures");
5853            for (id, item) in &self.texture_resolver.texture_cache_map {
5854                let file_name = format!("cache-{}", plain_self.textures.len() + 1);
5855                info!("\t{}", file_name);
5856                let plain = Self::save_texture(&item.texture, Some(item.category), &file_name, &root, &mut self.device);
5857                plain_self.textures.insert(*id, plain);
5858            }
5859
5860            config.serialize_for_resource(&plain_self, "renderer");
5861        }
5862
5863        self.device.reset_read_target();
5864        self.device.end_frame();
5865
5866        let mut stats_file = fs::File::create(config.root.join("profiler-stats.txt"))
5867            .expect(&format!("Unable to create profiler-stats.txt"));
5868        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
5869            self.profiler.dump_stats(&mut stats_file).unwrap();
5870        } else {
5871            writeln!(stats_file, "Turn on PROFILER_DBG or PROFILER_CAPTURE to get stats here!").unwrap();
5872        }
5873
5874        info!("done.");
5875    }
5876
5877    #[cfg(feature = "replay")]
5878    fn load_capture(
5879        &mut self,
5880        config: CaptureConfig,
5881        plain_externals: Vec<PlainExternalImage>,
5882    ) {
5883        use std::{fs::File, io::Read};
5884
5885        info!("loading external buffer-backed images");
5886        assert!(self.texture_resolver.external_images.is_empty());
5887        let mut raw_map = FastHashMap::<String, Arc<Vec<u8>>>::default();
5888        let mut image_handler = DummyExternalImageHandler {
5889            data: FastHashMap::default(),
5890        };
5891
5892        let root = config.resource_root();
5893
5894        // Note: this is a `SCENE` level population of the external image handlers
5895        // It would put both external buffers and texture into the map.
5896        // But latter are going to be overwritten later in this function
5897        // if we are in the `FRAME` level.
5898        for plain_ext in plain_externals {
5899            let data = match raw_map.entry(plain_ext.data) {
5900                Entry::Occupied(e) => e.get().clone(),
5901                Entry::Vacant(e) => {
5902                    let mut buffer = Vec::new();
5903                    File::open(root.join(e.key()))
5904                        .expect(&format!("Unable to open {}", e.key()))
5905                        .read_to_end(&mut buffer)
5906                        .unwrap();
5907                    e.insert(Arc::new(buffer)).clone()
5908                }
5909            };
5910            let ext = plain_ext.external;
5911            let value = (CapturedExternalImageData::Buffer(data), plain_ext.uv);
5912            image_handler.data.insert((ext.id, ext.channel_index), value);
5913        }
5914
5915        if let Some(external_resources) = config.deserialize_for_resource::<PlainExternalResources, _>("external_resources") {
5916            info!("loading external texture-backed images");
5917            let mut native_map = FastHashMap::<String, gl::GLuint>::default();
5918            for ExternalCaptureImage { short_path, external, descriptor } in external_resources.images {
5919                let target = match external.image_type {
5920                    ExternalImageType::TextureHandle(target) => target,
5921                    ExternalImageType::Buffer => continue,
5922                };
5923                let plain_ext = config.deserialize_for_resource::<PlainExternalImage, _>(&short_path)
5924                    .expect(&format!("Unable to read {}.ron", short_path));
5925                let key = (external.id, external.channel_index);
5926
5927                let tid = match native_map.entry(plain_ext.data) {
5928                    Entry::Occupied(e) => e.get().clone(),
5929                    Entry::Vacant(e) => {
5930                        let plain_tex = PlainTexture {
5931                            data: e.key().clone(),
5932                            size: descriptor.size,
5933                            format: descriptor.format,
5934                            filter: TextureFilter::Linear,
5935                            has_depth: false,
5936                            category: None,
5937                        };
5938                        let t = Self::load_texture(
5939                            target,
5940                            &plain_tex,
5941                            None,
5942                            &root,
5943                            &mut self.device
5944                        );
5945                        let extex = t.0.into_external();
5946                        self.owned_external_images.insert(key, extex.clone());
5947                        e.insert(extex.internal_id()).clone()
5948                    }
5949                };
5950
5951                let value = (CapturedExternalImageData::NativeTexture(tid), plain_ext.uv);
5952                image_handler.data.insert(key, value);
5953            }
5954        }
5955
5956        self.device.begin_frame();
5957        self.gpu_cache_texture.remove_texture(&mut self.device);
5958
5959        if let Some(renderer) = config.deserialize_for_resource::<PlainRenderer, _>("renderer") {
5960            info!("loading cached textures");
5961            self.device_size = renderer.device_size;
5962
5963            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
5964                self.device.delete_texture(item.texture);
5965            }
5966            for (id, texture) in renderer.textures {
5967                info!("\t{}", texture.data);
5968                let target = ImageBufferKind::Texture2D;
5969                let t = Self::load_texture(
5970                    target,
5971                    &texture,
5972                    Some(RenderTargetInfo { has_depth: texture.has_depth }),
5973                    &root,
5974                    &mut self.device
5975                );
5976                self.texture_resolver.texture_cache_map.insert(id, CacheTexture {
5977                    texture: t.0,
5978                    category: texture.category.unwrap_or(TextureCacheCategory::Standalone),
5979                });
5980            }
5981
5982            info!("loading gpu cache");
5983            let (t, gpu_cache_data) = Self::load_texture(
5984                ImageBufferKind::Texture2D,
5985                &renderer.gpu_cache,
5986                Some(RenderTargetInfo { has_depth: false }),
5987                &root,
5988                &mut self.device,
5989            );
5990            self.gpu_cache_texture.load_from_data(t, gpu_cache_data);
5991            self.gpu_cache_frame_id = renderer.gpu_cache_frame_id;
5992        } else {
5993            info!("loading cached textures");
5994            self.device.begin_frame();
5995            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
5996                self.device.delete_texture(item.texture);
5997            }
5998        }
5999        self.device.end_frame();
6000
6001        self.external_image_handler = Some(Box::new(image_handler) as Box<_>);
6002        info!("done.");
6003    }
6004}
6005
6006#[derive(Clone, Copy, PartialEq)]
6007enum FramebufferKind {
6008    Main,
6009    Other,
6010}
6011
6012fn should_skip_batch(kind: &BatchKind, flags: DebugFlags) -> bool {
6013    match kind {
6014        BatchKind::TextRun(_) => {
6015            flags.contains(DebugFlags::DISABLE_TEXT_PRIMS)
6016        }
6017        BatchKind::Brush(BrushBatchKind::LinearGradient) => {
6018            flags.contains(DebugFlags::DISABLE_GRADIENT_PRIMS)
6019        }
6020        _ => false,
6021    }
6022}
6023
6024impl CompositeState {
6025    /// Use the client provided native compositor interface to add all picture
6026    /// cache tiles to the OS compositor
6027    fn composite_native(
6028        &self,
6029        clear_color: ColorF,
6030        dirty_rects: &[DeviceIntRect],
6031        compositor: &mut dyn Compositor,
6032    ) {
6033        // Add each surface to the visual tree. z-order is implicit based on
6034        // order added. Offset and clip rect apply to all tiles within this
6035        // surface.
6036        for surface in &self.descriptor.surfaces {
6037            compositor.add_surface(
6038                surface.surface_id.expect("bug: no native surface allocated"),
6039                surface.transform,
6040                surface.clip_rect.to_i32(),
6041                surface.image_rendering,
6042            );
6043        }
6044        compositor.start_compositing(clear_color, dirty_rects, &[]);
6045    }
6046}
6047
6048mod tests {
6049    #[test]
6050    fn test_buffer_damage_tracker() {
6051        use super::BufferDamageTracker;
6052        use api::units::{DevicePoint, DeviceRect, DeviceSize};
6053
6054        let mut tracker = BufferDamageTracker::default();
6055        assert_eq!(tracker.get_damage_rect(0), None);
6056        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6057        assert_eq!(tracker.get_damage_rect(2), Some(DeviceRect::zero()));
6058        assert_eq!(tracker.get_damage_rect(3), Some(DeviceRect::zero()));
6059        assert_eq!(tracker.get_damage_rect(4), None);
6060
6061        let damage1 = DeviceRect::from_origin_and_size(DevicePoint::new(10.0, 10.0), DeviceSize::new(10.0, 10.0));
6062        let damage2 = DeviceRect::from_origin_and_size(DevicePoint::new(20.0, 20.0), DeviceSize::new(10.0, 10.0));
6063        let combined = damage1.union(&damage2);
6064
6065        tracker.push_dirty_rect(&damage1);
6066        assert_eq!(tracker.get_damage_rect(0), None);
6067        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6068        assert_eq!(tracker.get_damage_rect(2), Some(damage1));
6069        assert_eq!(tracker.get_damage_rect(3), Some(damage1));
6070        assert_eq!(tracker.get_damage_rect(4), None);
6071
6072        tracker.push_dirty_rect(&damage2);
6073        assert_eq!(tracker.get_damage_rect(0), None);
6074        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6075        assert_eq!(tracker.get_damage_rect(2), Some(damage2));
6076        assert_eq!(tracker.get_damage_rect(3), Some(combined));
6077        assert_eq!(tracker.get_damage_rect(4), None);
6078    }
6079}