egui_wgpu/
winit.rs

1#![allow(clippy::missing_errors_doc)]
2#![allow(clippy::undocumented_unsafe_blocks)]
3
4use crate::capture::{capture_channel, CaptureReceiver, CaptureSender, CaptureState};
5use crate::{renderer, RenderState, SurfaceErrorAction, WgpuConfiguration};
6use egui::{Context, Event, UserData, ViewportId, ViewportIdMap, ViewportIdSet};
7use std::{num::NonZeroU32, sync::Arc};
8
9struct SurfaceState {
10    surface: wgpu::Surface<'static>,
11    alpha_mode: wgpu::CompositeAlphaMode,
12    width: u32,
13    height: u32,
14}
15
16/// Everything you need to paint egui with [`wgpu`] on [`winit`].
17///
18/// Alternatively you can use [`crate::Renderer`] directly.
19///
20/// NOTE: all egui viewports share the same painter.
21pub struct Painter {
22    context: Context,
23    configuration: WgpuConfiguration,
24    msaa_samples: u32,
25    support_transparent_backbuffer: bool,
26    dithering: bool,
27    depth_format: Option<wgpu::TextureFormat>,
28    screen_capture_state: Option<CaptureState>,
29
30    instance: wgpu::Instance,
31    render_state: Option<RenderState>,
32
33    // Per viewport/window:
34    depth_texture_view: ViewportIdMap<wgpu::TextureView>,
35    msaa_texture_view: ViewportIdMap<wgpu::TextureView>,
36    surfaces: ViewportIdMap<SurfaceState>,
37    capture_tx: CaptureSender,
38    capture_rx: CaptureReceiver,
39}
40
41impl Painter {
42    /// Manages [`wgpu`] state, including surface state, required to render egui.
43    ///
44    /// Only the [`wgpu::Instance`] is initialized here. Device selection and the initialization
45    /// of render + surface state is deferred until the painter is given its first window target
46    /// via [`set_window()`](Self::set_window). (Ensuring that a device that's compatible with the
47    /// native window is chosen)
48    ///
49    /// Before calling [`paint_and_update_textures()`](Self::paint_and_update_textures) a
50    /// [`wgpu::Surface`] must be initialized (and corresponding render state) by calling
51    /// [`set_window()`](Self::set_window) once you have
52    /// a [`winit::window::Window`] with a valid `.raw_window_handle()`
53    /// associated.
54    pub async fn new(
55        context: Context,
56        configuration: WgpuConfiguration,
57        msaa_samples: u32,
58        depth_format: Option<wgpu::TextureFormat>,
59        support_transparent_backbuffer: bool,
60        dithering: bool,
61    ) -> Self {
62        let (capture_tx, capture_rx) = capture_channel();
63        let instance = configuration.wgpu_setup.new_instance().await;
64
65        Self {
66            context,
67            configuration,
68            msaa_samples,
69            support_transparent_backbuffer,
70            dithering,
71            depth_format,
72            screen_capture_state: None,
73
74            instance,
75            render_state: None,
76
77            depth_texture_view: Default::default(),
78            surfaces: Default::default(),
79            msaa_texture_view: Default::default(),
80
81            capture_tx,
82            capture_rx,
83        }
84    }
85
86    /// Get the [`RenderState`].
87    ///
88    /// Will return [`None`] if the render state has not been initialized yet.
89    pub fn render_state(&self) -> Option<RenderState> {
90        self.render_state.clone()
91    }
92
93    fn configure_surface(
94        surface_state: &SurfaceState,
95        render_state: &RenderState,
96        config: &WgpuConfiguration,
97    ) {
98        profiling::function_scope!();
99
100        let width = surface_state.width;
101        let height = surface_state.height;
102
103        let mut surf_config = wgpu::SurfaceConfiguration {
104            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
105            format: render_state.target_format,
106            present_mode: config.present_mode,
107            alpha_mode: surface_state.alpha_mode,
108            view_formats: vec![render_state.target_format],
109            ..surface_state
110                .surface
111                .get_default_config(&render_state.adapter, width, height)
112                .expect("The surface isn't supported by this adapter")
113        };
114
115        if let Some(desired_maximum_frame_latency) = config.desired_maximum_frame_latency {
116            surf_config.desired_maximum_frame_latency = desired_maximum_frame_latency;
117        }
118
119        surface_state
120            .surface
121            .configure(&render_state.device, &surf_config);
122    }
123
124    /// Updates (or clears) the [`winit::window::Window`] associated with the [`Painter`]
125    ///
126    /// This creates a [`wgpu::Surface`] for the given Window (as well as initializing render
127    /// state if needed) that is used for egui rendering.
128    ///
129    /// This must be called before trying to render via
130    /// [`paint_and_update_textures`](Self::paint_and_update_textures)
131    ///
132    /// # Portability
133    ///
134    /// _In particular it's important to note that on Android a it's only possible to create
135    /// a window surface between `Resumed` and `Paused` lifecycle events, and Winit will panic on
136    /// attempts to query the raw window handle while paused._
137    ///
138    /// On Android [`set_window`](Self::set_window) should be called with `Some(window)` for each
139    /// `Resumed` event and `None` for each `Paused` event. Currently, on all other platforms
140    /// [`set_window`](Self::set_window) may be called with `Some(window)` as soon as you have a
141    /// valid [`winit::window::Window`].
142    ///
143    /// # Errors
144    /// If the provided wgpu configuration does not match an available device.
145    pub async fn set_window(
146        &mut self,
147        viewport_id: ViewportId,
148        window: Option<Arc<winit::window::Window>>,
149    ) -> Result<(), crate::WgpuError> {
150        profiling::scope!("Painter::set_window"); // profile_function gives bad names for async functions
151
152        if let Some(window) = window {
153            let size = window.inner_size();
154            if !self.surfaces.contains_key(&viewport_id) {
155                let surface = self.instance.create_surface(window)?;
156                self.add_surface(surface, viewport_id, size).await?;
157            }
158        } else {
159            log::warn!("No window - clearing all surfaces");
160            self.surfaces.clear();
161        }
162        Ok(())
163    }
164
165    /// Updates (or clears) the [`winit::window::Window`] associated with the [`Painter`] without taking ownership of the window.
166    ///
167    /// Like [`set_window`](Self::set_window) except:
168    ///
169    /// # Safety
170    /// The user is responsible for ensuring that the window is alive for as long as it is set.
171    pub async unsafe fn set_window_unsafe(
172        &mut self,
173        viewport_id: ViewportId,
174        window: Option<&winit::window::Window>,
175    ) -> Result<(), crate::WgpuError> {
176        profiling::scope!("Painter::set_window_unsafe"); // profile_function gives bad names for async functions
177
178        if let Some(window) = window {
179            let size = window.inner_size();
180            if !self.surfaces.contains_key(&viewport_id) {
181                let surface = unsafe {
182                    self.instance
183                        .create_surface_unsafe(wgpu::SurfaceTargetUnsafe::from_window(&window)?)?
184                };
185                self.add_surface(surface, viewport_id, size).await?;
186            }
187        } else {
188            log::warn!("No window - clearing all surfaces");
189            self.surfaces.clear();
190        }
191        Ok(())
192    }
193
194    async fn add_surface(
195        &mut self,
196        surface: wgpu::Surface<'static>,
197        viewport_id: ViewportId,
198        size: winit::dpi::PhysicalSize<u32>,
199    ) -> Result<(), crate::WgpuError> {
200        let render_state = if let Some(render_state) = &self.render_state {
201            render_state
202        } else {
203            let render_state = RenderState::create(
204                &self.configuration,
205                &self.instance,
206                Some(&surface),
207                self.depth_format,
208                self.msaa_samples,
209                self.dithering,
210            )
211            .await?;
212            self.render_state.get_or_insert(render_state)
213        };
214        let alpha_mode = if self.support_transparent_backbuffer {
215            let supported_alpha_modes = surface.get_capabilities(&render_state.adapter).alpha_modes;
216
217            // Prefer pre multiplied over post multiplied!
218            if supported_alpha_modes.contains(&wgpu::CompositeAlphaMode::PreMultiplied) {
219                wgpu::CompositeAlphaMode::PreMultiplied
220            } else if supported_alpha_modes.contains(&wgpu::CompositeAlphaMode::PostMultiplied) {
221                wgpu::CompositeAlphaMode::PostMultiplied
222            } else {
223                log::warn!("Transparent window was requested, but the active wgpu surface does not support a `CompositeAlphaMode` with transparency.");
224                wgpu::CompositeAlphaMode::Auto
225            }
226        } else {
227            wgpu::CompositeAlphaMode::Auto
228        };
229        self.surfaces.insert(
230            viewport_id,
231            SurfaceState {
232                surface,
233                width: size.width,
234                height: size.height,
235                alpha_mode,
236            },
237        );
238        let Some(width) = NonZeroU32::new(size.width) else {
239            log::debug!("The window width was zero; skipping generate textures");
240            return Ok(());
241        };
242        let Some(height) = NonZeroU32::new(size.height) else {
243            log::debug!("The window height was zero; skipping generate textures");
244            return Ok(());
245        };
246        self.resize_and_generate_depth_texture_view_and_msaa_view(viewport_id, width, height);
247        Ok(())
248    }
249
250    /// Returns the maximum texture dimension supported if known
251    ///
252    /// This API will only return a known dimension after `set_window()` has been called
253    /// at least once, since the underlying device and render state are initialized lazily
254    /// once we have a window (that may determine the choice of adapter/device).
255    pub fn max_texture_side(&self) -> Option<usize> {
256        self.render_state
257            .as_ref()
258            .map(|rs| rs.device.limits().max_texture_dimension_2d as usize)
259    }
260
261    fn resize_and_generate_depth_texture_view_and_msaa_view(
262        &mut self,
263        viewport_id: ViewportId,
264        width_in_pixels: NonZeroU32,
265        height_in_pixels: NonZeroU32,
266    ) {
267        profiling::function_scope!();
268
269        let width = width_in_pixels.get();
270        let height = height_in_pixels.get();
271
272        let render_state = self.render_state.as_ref().unwrap();
273        let surface_state = self.surfaces.get_mut(&viewport_id).unwrap();
274
275        surface_state.width = width;
276        surface_state.height = height;
277
278        Self::configure_surface(surface_state, render_state, &self.configuration);
279
280        if let Some(depth_format) = self.depth_format {
281            self.depth_texture_view.insert(
282                viewport_id,
283                render_state
284                    .device
285                    .create_texture(&wgpu::TextureDescriptor {
286                        label: Some("egui_depth_texture"),
287                        size: wgpu::Extent3d {
288                            width,
289                            height,
290                            depth_or_array_layers: 1,
291                        },
292                        mip_level_count: 1,
293                        sample_count: self.msaa_samples,
294                        dimension: wgpu::TextureDimension::D2,
295                        format: depth_format,
296                        usage: wgpu::TextureUsages::RENDER_ATTACHMENT
297                            | wgpu::TextureUsages::TEXTURE_BINDING,
298                        view_formats: &[depth_format],
299                    })
300                    .create_view(&wgpu::TextureViewDescriptor::default()),
301            );
302        }
303
304        if let Some(render_state) = (self.msaa_samples > 1)
305            .then_some(self.render_state.as_ref())
306            .flatten()
307        {
308            let texture_format = render_state.target_format;
309            self.msaa_texture_view.insert(
310                viewport_id,
311                render_state
312                    .device
313                    .create_texture(&wgpu::TextureDescriptor {
314                        label: Some("egui_msaa_texture"),
315                        size: wgpu::Extent3d {
316                            width,
317                            height,
318                            depth_or_array_layers: 1,
319                        },
320                        mip_level_count: 1,
321                        sample_count: self.msaa_samples,
322                        dimension: wgpu::TextureDimension::D2,
323                        format: texture_format,
324                        usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
325                        view_formats: &[texture_format],
326                    })
327                    .create_view(&wgpu::TextureViewDescriptor::default()),
328            );
329        };
330    }
331
332    pub fn on_window_resized(
333        &mut self,
334        viewport_id: ViewportId,
335        width_in_pixels: NonZeroU32,
336        height_in_pixels: NonZeroU32,
337    ) {
338        profiling::function_scope!();
339
340        if self.surfaces.contains_key(&viewport_id) {
341            self.resize_and_generate_depth_texture_view_and_msaa_view(
342                viewport_id,
343                width_in_pixels,
344                height_in_pixels,
345            );
346        } else {
347            log::warn!("Ignoring window resize notification with no surface created via Painter::set_window()");
348        }
349    }
350
351    /// Returns two things:
352    ///
353    /// The approximate number of seconds spent on vsync-waiting (if any),
354    /// and the captures captured screenshot if it was requested.
355    ///
356    /// If `capture_data` isn't empty, a screenshot will be captured.
357    pub fn paint_and_update_textures(
358        &mut self,
359        viewport_id: ViewportId,
360        pixels_per_point: f32,
361        clear_color: [f32; 4],
362        clipped_primitives: &[epaint::ClippedPrimitive],
363        textures_delta: &epaint::textures::TexturesDelta,
364        capture_data: Vec<UserData>,
365    ) -> f32 {
366        profiling::function_scope!();
367
368        let capture = !capture_data.is_empty();
369        let mut vsync_sec = 0.0;
370
371        let Some(render_state) = self.render_state.as_mut() else {
372            return vsync_sec;
373        };
374        let Some(surface_state) = self.surfaces.get(&viewport_id) else {
375            return vsync_sec;
376        };
377
378        let mut encoder =
379            render_state
380                .device
381                .create_command_encoder(&wgpu::CommandEncoderDescriptor {
382                    label: Some("encoder"),
383                });
384
385        // Upload all resources for the GPU.
386        let screen_descriptor = renderer::ScreenDescriptor {
387            size_in_pixels: [surface_state.width, surface_state.height],
388            pixels_per_point,
389        };
390
391        let user_cmd_bufs = {
392            let mut renderer = render_state.renderer.write();
393            for (id, image_delta) in &textures_delta.set {
394                renderer.update_texture(
395                    &render_state.device,
396                    &render_state.queue,
397                    *id,
398                    image_delta,
399                );
400            }
401
402            renderer.update_buffers(
403                &render_state.device,
404                &render_state.queue,
405                &mut encoder,
406                clipped_primitives,
407                &screen_descriptor,
408            )
409        };
410
411        let output_frame = {
412            profiling::scope!("get_current_texture");
413            // This is what vsync-waiting happens on my Mac.
414            let start = web_time::Instant::now();
415            let output_frame = surface_state.surface.get_current_texture();
416            vsync_sec += start.elapsed().as_secs_f32();
417            output_frame
418        };
419
420        let output_frame = match output_frame {
421            Ok(frame) => frame,
422            Err(err) => match (*self.configuration.on_surface_error)(err) {
423                SurfaceErrorAction::RecreateSurface => {
424                    Self::configure_surface(surface_state, render_state, &self.configuration);
425                    return vsync_sec;
426                }
427                SurfaceErrorAction::SkipFrame => {
428                    return vsync_sec;
429                }
430            },
431        };
432
433        let mut capture_buffer = None;
434        {
435            let renderer = render_state.renderer.read();
436
437            let target_texture = if capture {
438                let capture_state = self.screen_capture_state.get_or_insert_with(|| {
439                    CaptureState::new(&render_state.device, &output_frame.texture)
440                });
441                capture_state.update(&render_state.device, &output_frame.texture);
442
443                &capture_state.texture
444            } else {
445                &output_frame.texture
446            };
447            let target_view = target_texture.create_view(&wgpu::TextureViewDescriptor::default());
448
449            let (view, resolve_target) = (self.msaa_samples > 1)
450                .then_some(self.msaa_texture_view.get(&viewport_id))
451                .flatten()
452                .map_or((&target_view, None), |texture_view| {
453                    (texture_view, Some(&target_view))
454                });
455
456            let render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
457                label: Some("egui_render"),
458                color_attachments: &[Some(wgpu::RenderPassColorAttachment {
459                    view,
460                    resolve_target,
461                    ops: wgpu::Operations {
462                        load: wgpu::LoadOp::Clear(wgpu::Color {
463                            r: clear_color[0] as f64,
464                            g: clear_color[1] as f64,
465                            b: clear_color[2] as f64,
466                            a: clear_color[3] as f64,
467                        }),
468                        store: wgpu::StoreOp::Store,
469                    },
470                })],
471                depth_stencil_attachment: self.depth_texture_view.get(&viewport_id).map(|view| {
472                    wgpu::RenderPassDepthStencilAttachment {
473                        view,
474                        depth_ops: Some(wgpu::Operations {
475                            load: wgpu::LoadOp::Clear(1.0),
476                            // It is very unlikely that the depth buffer is needed after egui finished rendering
477                            // so no need to store it. (this can improve performance on tiling GPUs like mobile chips or Apple Silicon)
478                            store: wgpu::StoreOp::Discard,
479                        }),
480                        stencil_ops: None,
481                    }
482                }),
483                timestamp_writes: None,
484                occlusion_query_set: None,
485            });
486
487            // Forgetting the pass' lifetime means that we are no longer compile-time protected from
488            // runtime errors caused by accessing the parent encoder before the render pass is dropped.
489            // Since we don't pass it on to the renderer, we should be perfectly safe against this mistake here!
490            renderer.render(
491                &mut render_pass.forget_lifetime(),
492                clipped_primitives,
493                &screen_descriptor,
494            );
495
496            if capture {
497                if let Some(capture_state) = &mut self.screen_capture_state {
498                    capture_buffer = Some(capture_state.copy_textures(
499                        &render_state.device,
500                        &output_frame,
501                        &mut encoder,
502                    ));
503                }
504            }
505        }
506
507        let encoded = {
508            profiling::scope!("CommandEncoder::finish");
509            encoder.finish()
510        };
511
512        // Submit the commands: both the main buffer and user-defined ones.
513        {
514            profiling::scope!("Queue::submit");
515            // wgpu doesn't document where vsync can happen. Maybe here?
516            let start = web_time::Instant::now();
517            render_state
518                .queue
519                .submit(user_cmd_bufs.into_iter().chain([encoded]));
520            vsync_sec += start.elapsed().as_secs_f32();
521        };
522
523        // Free textures marked for destruction **after** queue submit since they might still be used in the current frame.
524        // Calling `wgpu::Texture::destroy` on a texture that is still in use would invalidate the command buffer(s) it is used in.
525        // However, once we called `wgpu::Queue::submit`, it is up for wgpu to determine how long the underlying gpu resource has to live.
526        {
527            let mut renderer = render_state.renderer.write();
528            for id in &textures_delta.free {
529                renderer.free_texture(id);
530            }
531        }
532
533        if let Some(capture_buffer) = capture_buffer {
534            if let Some(screen_capture_state) = &mut self.screen_capture_state {
535                screen_capture_state.read_screen_rgba(
536                    self.context.clone(),
537                    capture_buffer,
538                    capture_data,
539                    self.capture_tx.clone(),
540                    viewport_id,
541                );
542            }
543        }
544
545        {
546            profiling::scope!("present");
547            // wgpu doesn't document where vsync can happen. Maybe here?
548            let start = web_time::Instant::now();
549            output_frame.present();
550            vsync_sec += start.elapsed().as_secs_f32();
551        }
552
553        vsync_sec
554    }
555
556    /// Call this at the beginning of each frame to receive the requested screenshots.
557    pub fn handle_screenshots(&self, events: &mut Vec<Event>) {
558        for (viewport_id, user_data, screenshot) in self.capture_rx.try_iter() {
559            let screenshot = Arc::new(screenshot);
560            for data in user_data {
561                events.push(Event::Screenshot {
562                    viewport_id,
563                    user_data: data,
564                    image: screenshot.clone(),
565                });
566            }
567        }
568    }
569
570    pub fn gc_viewports(&mut self, active_viewports: &ViewportIdSet) {
571        self.surfaces.retain(|id, _| active_viewports.contains(id));
572        self.depth_texture_view
573            .retain(|id, _| active_viewports.contains(id));
574        self.msaa_texture_view
575            .retain(|id, _| active_viewports.contains(id));
576    }
577
578    #[allow(clippy::needless_pass_by_ref_mut, clippy::unused_self)]
579    pub fn destroy(&mut self) {
580        // TODO(emilk): something here?
581    }
582}