rendy_factory/
factory.rs

1use {
2    crate::{
3        blitter::Blitter,
4        command::{
5            families_from_device, CommandPool, Families, Family, FamilyId, Fence, QueueType, Reset,
6        },
7        config::{Config, DevicesConfigure, HeapsConfigure, QueuesConfigure},
8        core::{rendy_with_slow_safety_checks, Device, DeviceId, Instance, InstanceId},
9        descriptor::DescriptorAllocator,
10        memory::{self, Heaps, MemoryUsage, TotalMemoryUtilization, Write},
11        resource::*,
12        upload::{BufferState, ImageState, ImageStateOrLayout, Uploader},
13        wsi::{Surface, SwapchainError, Target},
14    },
15    rendy_core::{
16        hal::{
17            adapter::{Adapter, Gpu, PhysicalDevice},
18            buffer,
19            device::{
20                AllocationError, CreationError, Device as _, MapError, OomOrDeviceLost,
21                OutOfMemory, WaitFor,
22            },
23            format, image,
24            pso::DescriptorSetLayoutBinding,
25            window::{Extent2D, InitError, Surface as GfxSurface},
26            Backend, Features, Instance as _, Limits,
27        },
28        HasRawWindowHandle,
29    },
30    smallvec::SmallVec,
31    std::{borrow::BorrowMut, cmp::max, mem::ManuallyDrop},
32    thread_profiler::profile_scope,
33};
34
35#[derive(Debug)]
36struct ResourceHub<B: Backend> {
37    buffers: ResourceTracker<Buffer<B>>,
38    images: ResourceTracker<Image<B>>,
39    views: ResourceTracker<ImageView<B>>,
40    layouts: ResourceTracker<DescriptorSetLayout<B>>,
41    sets: ResourceTracker<DescriptorSet<B>>,
42    samplers: ResourceTracker<Sampler<B>>,
43    samplers_cache: parking_lot::RwLock<SamplerCache<B>>,
44}
45
46impl<B> Default for ResourceHub<B>
47where
48    B: Backend,
49{
50    fn default() -> Self {
51        ResourceHub {
52            buffers: ResourceTracker::default(),
53            images: ResourceTracker::default(),
54            views: ResourceTracker::default(),
55            layouts: ResourceTracker::default(),
56            sets: ResourceTracker::default(),
57            samplers: ResourceTracker::default(),
58            samplers_cache: parking_lot::RwLock::new(SamplerCache::default()),
59        }
60    }
61}
62
63impl<B> ResourceHub<B>
64where
65    B: Backend,
66{
67    unsafe fn cleanup(
68        &mut self,
69        device: &Device<B>,
70        heaps: &mut Heaps<B>,
71        allocator: &mut DescriptorAllocator<B>,
72        next: Epochs,
73        complete: Epochs,
74    ) {
75        self.sets
76            .cleanup(|s| s.dispose(allocator), &next, &complete);
77        self.views.cleanup(|v| v.dispose(device), &next, &complete);
78        self.layouts
79            .cleanup(|l| l.dispose(device), &next, &complete);
80        self.buffers
81            .cleanup(|b| b.dispose(device, heaps), &next, &complete);
82        self.images
83            .cleanup(|i| i.dispose(device, heaps), &next, &complete);
84        self.samplers
85            .cleanup(|i| i.dispose(device), &next, &complete);
86    }
87
88    unsafe fn dispose(
89        mut self,
90        device: &Device<B>,
91        heaps: &mut Heaps<B>,
92        allocator: &mut DescriptorAllocator<B>,
93    ) {
94        drop(self.samplers_cache);
95        self.sets.dispose(|s| s.dispose(allocator));
96        self.views.dispose(|v| v.dispose(device));
97        self.layouts.dispose(|l| l.dispose(device));
98        self.buffers.dispose(|b| b.dispose(device, heaps));
99        self.images.dispose(|i| i.dispose(device, heaps));
100        self.samplers.dispose(|i| i.dispose(device));
101    }
102}
103
104/// Failure uploading a buffer or an image.
105#[derive(Clone, Debug, PartialEq)]
106pub enum UploadError {
107    /// Failed to create the staging buffer.
108    Create(BufferCreationError),
109    /// Failed to map the staging buffer.
110    Map(MapError),
111    /// Failed to upload the data.
112    Upload(OutOfMemory),
113}
114
115impl std::fmt::Display for UploadError {
116    fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
117        match self {
118            UploadError::Create(err) => write!(fmt, "Upload failed: {:?}", err),
119            UploadError::Map(err) => write!(fmt, "Upload failed: {:?}", err),
120            UploadError::Upload(err) => write!(fmt, "Upload failed: {:?}", err),
121        }
122    }
123}
124
125impl std::error::Error for UploadError {
126    fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
127        match self {
128            UploadError::Create(err) => Some(err),
129            UploadError::Map(err) => Some(err),
130            UploadError::Upload(err) => Some(err),
131        }
132    }
133}
134
135#[derive(Debug)]
136enum InstanceOrId<B: Backend> {
137    Instance(Instance<B>),
138    Id(InstanceId),
139}
140
141impl<B> InstanceOrId<B>
142where
143    B: Backend,
144{
145    fn id(&self) -> InstanceId {
146        match self {
147            InstanceOrId::Instance(instance) => instance.id(),
148            InstanceOrId::Id(id) => *id,
149        }
150    }
151
152    fn as_instance(&self) -> Option<&Instance<B>> {
153        match self {
154            InstanceOrId::Instance(instance) => Some(instance),
155            InstanceOrId::Id(_) => None,
156        }
157    }
158}
159
160/// Higher level device interface.
161/// Manges memory, resources and queue families.
162#[derive(Debug)]
163pub struct Factory<B: Backend> {
164    descriptor_allocator: ManuallyDrop<parking_lot::Mutex<DescriptorAllocator<B>>>,
165    heaps: ManuallyDrop<parking_lot::Mutex<Heaps<B>>>,
166    resources: ManuallyDrop<ResourceHub<B>>,
167    epochs: Vec<parking_lot::RwLock<Vec<u64>>>,
168    uploader: Uploader<B>,
169    blitter: Blitter<B>,
170    families_indices: Vec<usize>,
171    device: Device<B>,
172    adapter: Adapter<B>,
173    instance: InstanceOrId<B>,
174}
175
176#[allow(unused)]
177fn factory_is_send_sync<B: Backend>() {
178    fn is_send_sync<T: Send + Sync>() {}
179    is_send_sync::<Factory<B>>();
180}
181
182impl<B> Drop for Factory<B>
183where
184    B: Backend,
185{
186    fn drop(&mut self) {
187        log::debug!("Dropping factory");
188        self.wait_idle().unwrap();
189
190        unsafe {
191            // Device is idle.
192            self.uploader.dispose(&self.device);
193            log::trace!("Uploader disposed");
194            self.blitter.dispose(&self.device);
195            log::trace!("Blitter disposed");
196            std::ptr::read(&mut *self.resources).dispose(
197                &self.device,
198                self.heaps.get_mut(),
199                self.descriptor_allocator.get_mut(),
200            );
201
202            log::trace!("Resources disposed");
203        }
204
205        unsafe {
206            std::ptr::read(&mut *self.heaps)
207                .into_inner()
208                .dispose(&self.device);
209            log::trace!("Heaps disposed");
210        }
211
212        unsafe {
213            std::ptr::read(&mut *self.descriptor_allocator)
214                .into_inner()
215                .dispose(&self.device);
216            log::trace!("Descriptor allocator disposed");
217        }
218
219        log::trace!("Factory dropped");
220    }
221}
222
223impl<B> Factory<B>
224where
225    B: Backend,
226{
227    /// Wait for whole device become idle.
228    /// This function is very heavy and
229    /// usually used only for teardown.
230    pub fn wait_idle(&self) -> Result<(), OutOfMemory> {
231        profile_scope!("wait_idle");
232
233        log::debug!("Wait device idle");
234        self.device.wait_idle()?;
235        log::trace!("Device idle");
236        Ok(())
237    }
238
239    /// Creates a buffer with the specified properties.
240    ///
241    /// This function returns relevant value, that is, the value cannot be dropped.
242    /// However buffer can be destroyed using [`destroy_relevant_buffer`] function.
243    ///
244    /// [`destroy_relevant_buffer`]: #method.destroy_relevant_buffer
245    pub fn create_relevant_buffer(
246        &self,
247        info: BufferInfo,
248        memory_usage: impl MemoryUsage,
249    ) -> Result<Buffer<B>, BufferCreationError> {
250        profile_scope!("create_relevant_buffer");
251
252        unsafe { Buffer::create(&self.device, &mut self.heaps.lock(), info, memory_usage) }
253    }
254
255    /// Destroy buffer.
256    /// If buffer was created using [`create_buffer`] it must be unescaped first.
257    /// If buffer was shaderd unescaping may fail due to other owners existing.
258    /// In any case unescaping and destroying manually can slightly increase performance.
259    ///
260    /// # Safety
261    ///
262    /// Buffer must not be used by any pending commands or referenced anywhere.
263    ///
264    /// [`create_buffer`]: #method.create_buffer
265    pub unsafe fn destroy_relevant_buffer(&self, buffer: Buffer<B>) {
266        buffer.dispose(&self.device, &mut self.heaps.lock());
267    }
268
269    /// Creates a buffer with the specified properties.
270    ///
271    /// This function (unlike [`create_relevant_buffer`]) returns value that can be dropped.
272    ///
273    /// [`create_relevant_buffer`]: #method.create_relevant_buffer
274    pub fn create_buffer(
275        &self,
276        info: BufferInfo,
277        memory_usage: impl MemoryUsage,
278    ) -> Result<Escape<Buffer<B>>, BufferCreationError> {
279        let buffer = self.create_relevant_buffer(info, memory_usage)?;
280        Ok(self.resources.buffers.escape(buffer))
281    }
282
283    /// Creates an image with the specified properties.
284    ///
285    /// This function returns relevant value, that is, the value cannot be dropped.
286    /// However image can be destroyed using [`destroy_relevant_image`] function.
287    ///
288    /// [`destroy_relevant_image`]: #method.destroy_relevant_image
289    pub fn create_relevant_image(
290        &self,
291        info: ImageInfo,
292        memory_usage: impl MemoryUsage,
293    ) -> Result<Image<B>, ImageCreationError> {
294        profile_scope!("create_relevant_image");
295
296        unsafe { Image::create(&self.device, &mut self.heaps.lock(), info, memory_usage) }
297    }
298
299    /// Destroy image.
300    /// If image was created using [`create_image`] it must be unescaped first.
301    /// If image was shaderd unescaping may fail due to other owners existing.
302    /// In any case unescaping and destroying manually can slightly increase performance.
303    ///
304    /// # Safety
305    ///
306    /// Image must not be used by any pending commands or referenced anywhere.
307    ///
308    /// [`create_image`]: #method.create_image
309    pub unsafe fn destroy_relevant_image(&self, image: Image<B>) {
310        image.dispose(&self.device, &mut self.heaps.lock());
311    }
312
313    /// Creates an image with the specified properties.
314    ///
315    /// This function (unlike [`create_relevant_image`]) returns value that can be dropped.
316    ///
317    /// [`create_relevant_image`]: #method.create_relevant_image
318    pub fn create_image(
319        &self,
320        info: ImageInfo,
321        memory_usage: impl MemoryUsage,
322    ) -> Result<Escape<Image<B>>, ImageCreationError> {
323        let image = self.create_relevant_image(info, memory_usage)?;
324        Ok(self.resources.images.escape(image))
325    }
326
327    /// Fetch image format details for a particular `ImageInfo`.
328    pub fn image_format_properties(&self, info: ImageInfo) -> Option<FormatProperties> {
329        self.physical().image_format_properties(
330            info.format,
331            match info.kind {
332                Kind::D1(_, _) => 1,
333                Kind::D2(_, _, _, _) => 2,
334                Kind::D3(_, _, _) => 3,
335            },
336            info.tiling,
337            info.usage,
338            info.view_caps,
339        )
340    }
341
342    /// Create an image view with the specified properties
343    ///
344    /// This function returns relevant value, that is, the value cannot be dropped.
345    /// However image view can be destroyed using [`destroy_relevant_image_view`] function.
346    ///
347    /// [`destroy_relevant_image_view`]: #method.destroy_relevant_image_view
348    pub fn create_relevant_image_view(
349        &self,
350        image: Handle<Image<B>>,
351        info: ImageViewInfo,
352    ) -> Result<ImageView<B>, ImageViewCreationError> {
353        ImageView::create(&self.device, info, image)
354    }
355
356    /// Destroy image view.
357    /// If image view was created using [`create_image_view`] it must be unescaped first.
358    /// If image view was shaderd unescaping may fail due to other owners existing.
359    /// In any case unescaping and destroying manually can slightly increase performance.
360    ///
361    /// # Safety
362    ///
363    /// Image view must not be used by any pending commands or referenced anywhere.
364    ///
365    /// [`create_image_view`]: #method.create_image_view
366    pub unsafe fn destroy_relevant_image_view(&self, view: ImageView<B>) {
367        view.dispose(&self.device);
368    }
369
370    /// Create an image view with the specified properties
371    ///
372    /// This function (unlike [`create_relevant_image_view`]) returns value that can be dropped.
373    ///
374    /// [`create_relevant_image_view`]: #method.create_relevant_image_view
375    pub fn create_image_view(
376        &self,
377        image: Handle<Image<B>>,
378        info: ImageViewInfo,
379    ) -> Result<Escape<ImageView<B>>, ImageViewCreationError> {
380        let view = self.create_relevant_image_view(image, info)?;
381        Ok(self.resources.views.escape(view))
382    }
383
384    /// Create an sampler with the specified properties
385    ///
386    /// This function returns relevant value, that is, the value cannot be dropped.
387    /// However sampler can be destroyed using [`destroy_relevant_sampler`] function.
388    ///
389    /// [`destroy_relevant_sampler`]: #method.destroy_relevant_sampler
390    pub fn create_relevant_sampler(
391        &self,
392        info: SamplerDesc,
393    ) -> Result<Sampler<B>, AllocationError> {
394        Sampler::create(&self.device, info)
395    }
396
397    /// Destroy sampler.
398    /// If sampler was created using [`create_sampler`] it must be unescaped first.
399    /// If sampler was shaderd unescaping may fail due to other owners existing.
400    /// In any case unescaping and destroying manually can slightly increase performance.
401    /// If sampler was acquired using [`get_sampler`] unescaping will most probably fail
402    /// due to factory holding handler's copy in cache.
403    ///
404    /// # Safety
405    ///
406    /// Sampler view must not be used by any pending commands or referenced anywhere.
407    ///
408    /// [`create_sampler`]: #method.create_sampler
409    /// [`get_sampler`]: #method.get_sampler
410    pub unsafe fn destroy_relevant_sampler(&self, sampler: Sampler<B>) {
411        sampler.dispose(&self.device);
412    }
413
414    /// Creates a sampler with the specified properties.
415    ///
416    /// This function (unlike [`create_relevant_sampler`]) returns value that can be dropped.
417    ///
418    /// [`create_relevant_sampler`]: #method.create_relevant_sampler
419    pub fn create_sampler(&self, info: SamplerDesc) -> Result<Escape<Sampler<B>>, AllocationError> {
420        let sampler = self.create_relevant_sampler(info)?;
421        Ok(self.resources.samplers.escape(sampler))
422    }
423
424    /// Get cached sampler or create new one.
425    /// User should prefer this function to [`create_sampler`] and [`create_relevant_sampler`]
426    /// because usually only few sampler configuration is required.
427    ///
428    /// [`create_sampler`]: #method.create_sampler
429    /// [`create_relevant_sampler`]: #method.create_relevant_sampler
430    pub fn get_sampler(&self, info: SamplerDesc) -> Result<Handle<Sampler<B>>, AllocationError> {
431        let samplers = &self.resources.samplers;
432        let device = &self.device;
433
434        SamplerCache::get_with_upgradable_lock(
435            self.resources.samplers_cache.upgradable_read(),
436            parking_lot::RwLockUpgradableReadGuard::upgrade,
437            info.clone(),
438            || Ok(samplers.handle(Sampler::create(device, info)?)),
439        )
440    }
441
442    /// Update content of the buffer bound to host visible memory.
443    /// This function (unlike [`upload_buffer`]) update content immediatelly.
444    ///
445    /// Buffers allocated from host-invisible memory types cannot be
446    /// updated via this function.
447    ///
448    /// Updated content will be automatically made visible to device operations
449    /// that will be submitted later.
450    ///
451    /// # Panics
452    ///
453    /// Panics if buffer size is less than `offset` + size of `content`.
454    ///
455    /// # Safety
456    ///
457    /// Caller must ensure that device doesn't use memory region that being updated.
458    ///
459    /// [`upload_buffer`]: #method.upload_buffer
460    pub unsafe fn upload_visible_buffer<T>(
461        &self,
462        buffer: &mut Buffer<B>,
463        offset: u64,
464        content: &[T],
465    ) -> Result<(), MapError>
466    where
467        T: 'static + Copy,
468    {
469        let content = std::slice::from_raw_parts(
470            content.as_ptr() as *const u8,
471            content.len() * std::mem::size_of::<T>(),
472        );
473
474        let mut mapped = buffer.map(&self.device, offset..offset + content.len() as u64)?;
475        mapped
476            .write(&self.device, 0..content.len() as u64)?
477            .write(content);
478        Ok(())
479    }
480
481    /// Update buffer range content with provided data.
482    ///
483    /// Update operation will actually be submitted to the graphics device queue
484    /// upon next [`flush_uploads`] or [`maintain`] call to this `Factory`, and
485    /// is guaranteed to take place after all previous operations that have been
486    /// submitted to the same graphics queue on this `Factory` since last
487    /// [`flush_uploads`] or [`maintain`] call
488    ///
489    /// Note that buffer range will receive `content` as raw bytes.
490    /// And interpretation will depend solely on device operation.
491    /// Slice of generic type is allowed for convenience.
492    /// It usually should be POD struct of numeric values or other POD structs.
493    ///
494    /// `#[repr(C)]` can be used to guarantee defined memory layout of struct fields.
495    ///
496    /// # Safety
497    ///
498    /// If buffer is used by device then `last` state must match the last usage state of the buffer
499    /// before updating happen.
500    /// In order to guarantee that updated content will be made visible to next device operation
501    /// that reads content of the buffer range the `next` must match buffer usage state in that operation.
502    pub unsafe fn upload_buffer<T>(
503        &self,
504        buffer: &Buffer<B>,
505        offset: u64,
506        content: &[T],
507        last: Option<BufferState>,
508        next: BufferState,
509    ) -> Result<(), UploadError>
510    where
511        T: 'static + Copy,
512    {
513        assert!(buffer.info().usage.contains(buffer::Usage::TRANSFER_DST));
514
515        let content_size = content.len() as u64 * std::mem::size_of::<T>() as u64;
516        let mut staging = self
517            .create_buffer(
518                BufferInfo {
519                    size: content_size,
520                    usage: buffer::Usage::TRANSFER_SRC,
521                },
522                memory::Upload,
523            )
524            .map_err(UploadError::Create)?;
525
526        self.upload_visible_buffer(&mut staging, 0, content)
527            .map_err(UploadError::Map)?;
528
529        self.uploader
530            .upload_buffer(&self.device, buffer, offset, staging, last, next)
531            .map_err(UploadError::Upload)
532    }
533
534    /// Update buffer content with provided staging buffer.
535    ///
536    /// Update operation will actually be submitted to the graphics device queue
537    /// upon next [`flush_uploads`] or [`maintain`] call to this `Factory`, and
538    /// is guaranteed to take place after all previous operations that have been
539    /// submitted to the same graphics queue on this `Factory` since last
540    /// [`flush_uploads`] or [`maintain`] call
541    ///
542    /// # Safety
543    ///
544    /// If buffer is used by device then `last` state must match the last usage state of the buffer
545    /// before updating happen.
546    /// In order to guarantee that updated content will be made visible to next device operation
547    /// that reads content of the buffer range the `next` must match buffer usage state in that operation.
548    pub unsafe fn upload_from_staging_buffer(
549        &self,
550        buffer: &Buffer<B>,
551        offset: u64,
552        staging: Escape<Buffer<B>>,
553        last: Option<BufferState>,
554        next: BufferState,
555    ) -> Result<(), OutOfMemory> {
556        assert!(buffer.info().usage.contains(buffer::Usage::TRANSFER_DST));
557        assert!(staging.info().usage.contains(buffer::Usage::TRANSFER_SRC));
558        self.uploader
559            .upload_buffer(&self.device, buffer, offset, staging, last, next)
560    }
561
562    /// Update image layers content with provided data.
563    /// Transition part of image from one state to another.
564    ///
565    /// Update operation will actually be submitted to the graphics device queue
566    /// upon next [`flush_uploads`] or [`maintain`] call to this `Factory`, and
567    /// is guaranteed to take place after all previous operations that have been
568    /// submitted to the same graphics queue on this `Factory` since last
569    /// [`flush_uploads`] or [`maintain`] call
570    ///
571    /// # Safety
572    ///
573    /// Image must be created by this `Factory`.
574    /// If image is used by device then `last` state must match the last usage state of the image
575    /// before transition.
576    pub unsafe fn transition_image(
577        &self,
578        image: Handle<Image<B>>,
579        image_range: SubresourceRange,
580        last: impl Into<ImageStateOrLayout>,
581        next: ImageState,
582    ) {
583        self.uploader
584            .transition_image(image, image_range, last.into(), next);
585    }
586
587    /// Update image layers content with provided data.
588    ///
589    /// Update operation will actually be submitted to the graphics device queue
590    /// upon next [`flush_uploads`] or [`maintain`] call to this `Factory`, and
591    /// is guaranteed to take place after all previous operations that have been
592    /// submitted to the same graphics queue on this `Factory` since last
593    /// [`flush_uploads`] or [`maintain`] call
594    ///
595    /// Note that image layers will receive `content` as raw bytes.
596    /// And interpretation will depend solely on device operation.
597    /// Slice of generic type is allowed for convenience.
598    /// It usually should be compatible type of pixel or channel.
599    /// For example `&[[u8; 4]]` or `&[u8]` for `Rgba8Unorm` format.
600    ///
601    /// # Safety
602    ///
603    /// Image must be created by this `Factory`.
604    /// If image is used by device then `last` state must match the last usage state of the image
605    /// before updating happen.
606    /// In order to guarantee that updated content will be made visible to next device operation
607    /// that reads content of the image layers the `next` must match image usage state in that operation.
608    pub unsafe fn upload_image<T>(
609        &self,
610        image: Handle<Image<B>>,
611        data_width: u32,
612        data_height: u32,
613        image_layers: SubresourceLayers,
614        image_offset: image::Offset,
615        image_extent: Extent,
616        content: &[T],
617        last: impl Into<ImageStateOrLayout>,
618        next: ImageState,
619    ) -> Result<(), UploadError>
620    where
621        T: 'static + Copy,
622    {
623        assert!(image.info().usage.contains(image::Usage::TRANSFER_DST));
624        assert_eq!(image.format().surface_desc().aspects, image_layers.aspects);
625        assert!(image_layers.layers.start <= image_layers.layers.end);
626        assert!(image_layers.layers.end <= image.kind().num_layers());
627        assert!(image_layers.level <= image.info().levels);
628
629        let content_size = content.len() as u64 * std::mem::size_of::<T>() as u64;
630        let format_desc = image.format().surface_desc();
631        let texels_count = (image_extent.width / format_desc.dim.0 as u32) as u64
632            * (image_extent.height / format_desc.dim.1 as u32) as u64
633            * image_extent.depth as u64
634            * (image_layers.layers.end - image_layers.layers.start) as u64;
635        let total_bytes = (format_desc.bits as u64 / 8) * texels_count;
636        assert_eq!(
637            total_bytes, content_size,
638            "Size of must match size of the image region"
639        );
640
641        let mut staging = self
642            .create_buffer(
643                BufferInfo {
644                    size: content_size,
645                    usage: buffer::Usage::TRANSFER_SRC,
646                },
647                memory::Upload,
648            )
649            .map_err(UploadError::Create)?;
650
651        self.upload_visible_buffer(&mut staging, 0, content)
652            .map_err(UploadError::Map)?;
653
654        self.uploader
655            .upload_image(
656                &self.device,
657                image,
658                data_width,
659                data_height,
660                image_layers,
661                image_offset,
662                image_extent,
663                staging,
664                last.into(),
665                next,
666            )
667            .map_err(UploadError::Upload)
668    }
669
670    /// Get blitter instance
671    pub fn blitter(&self) -> &Blitter<B> {
672        &self.blitter
673    }
674
675    /// Create rendering surface from window handle.
676    pub fn create_surface(
677        &mut self,
678        handle: &impl HasRawWindowHandle,
679    ) -> Result<Surface<B>, InitError> {
680        profile_scope!("create_surface");
681        Surface::new(
682            self.instance
683                .as_instance()
684                .expect("Cannot create surface without instance"),
685            handle,
686        )
687    }
688
689    /// Create rendering surface from window.
690    ///
691    /// # Safety
692    ///
693    /// Closure must return surface object created from raw instance provided as closure argument.
694    pub unsafe fn create_surface_with(
695        &mut self,
696        f: impl FnOnce(&B::Instance) -> B::Surface,
697    ) -> Surface<B> {
698        profile_scope!("create_surface");
699        Surface::new_with(
700            self.instance
701                .as_instance()
702                .expect("Cannot create surface without instance"),
703            f,
704        )
705    }
706
707    /// Get formats supported by the Surface
708    ///
709    /// # Panics
710    ///
711    /// Panics if `surface` was not created by this `Factory`
712    pub fn get_surface_formats(
713        &self,
714        surface: &Surface<B>,
715    ) -> Option<Vec<rendy_core::hal::format::Format>> {
716        profile_scope!("get_surface_compatibility");
717
718        assert_eq!(
719            surface.instance_id(),
720            self.instance.id(),
721            "Resource is not owned by specified instance"
722        );
723        unsafe { surface.supported_formats(&self.adapter.physical_device) }
724    }
725
726    /// Get compatibility of Surface
727    ///
728    /// # Panics
729    ///
730    /// Panics if `surface` was not created by this `Factory`
731    pub fn get_surface_capabilities(
732        &self,
733        surface: &Surface<B>,
734    ) -> rendy_core::hal::window::SurfaceCapabilities {
735        profile_scope!("get_surface_compatibility");
736
737        assert_eq!(
738            surface.instance_id(),
739            self.instance.id(),
740            "Resource is not owned by specified instance"
741        );
742        unsafe { surface.capabilities(&self.adapter.physical_device) }
743    }
744
745    /// Get surface format.
746    ///
747    /// # Panics
748    ///
749    /// Panics if `surface` was not created by this `Factory`
750    pub fn get_surface_format(&self, surface: &Surface<B>) -> format::Format {
751        profile_scope!("get_surface_format");
752
753        assert_eq!(
754            surface.instance_id(),
755            self.instance.id(),
756            "Resource is not owned by specified instance"
757        );
758        unsafe { surface.format(&self.adapter.physical_device) }
759    }
760
761    /// Check if queue family supports presentation to the specified surface.
762    pub fn surface_support(&self, family: FamilyId, surface: &Surface<B>) -> bool {
763        assert_eq!(
764            surface.instance_id(),
765            self.instance.id(),
766            "Resource is not owned by specified instance"
767        );
768        surface
769            .raw()
770            .supports_queue_family(&self.adapter.queue_families[family.index])
771    }
772
773    /// Destroy surface returning underlying window back to the caller.
774    ///
775    /// # Panics
776    ///
777    /// Panics if `surface` was not created by this `Factory`
778    pub fn destroy_surface(&mut self, surface: Surface<B>) {
779        assert_eq!(
780            surface.instance_id(),
781            self.instance.id(),
782            "Resource is not owned by specified instance"
783        );
784        drop(surface);
785    }
786
787    /// Create target out of rendering surface.
788    ///
789    /// The compatibility of the surface with the queue family which will present to
790    /// this target must have *already* been checked using `Factory::surface_support`.
791    ///
792    /// # Panics
793    ///
794    /// Panics if `surface` was not created by this `Factory`.
795    pub fn create_target(
796        &self,
797        surface: Surface<B>,
798        extent: Extent2D,
799        image_count: u32,
800        present_mode: rendy_core::hal::window::PresentMode,
801        usage: image::Usage,
802    ) -> Result<Target<B>, SwapchainError> {
803        profile_scope!("create_target");
804
805        unsafe {
806            surface.into_target(
807                &self.adapter.physical_device,
808                &self.device,
809                extent,
810                image_count,
811                present_mode,
812                usage,
813            )
814        }
815    }
816
817    /// Destroy target returning underlying surface back to the caller.
818    ///
819    /// # Safety
820    ///
821    /// Target images must not be used by pending commands or referenced anywhere.
822    pub unsafe fn destroy_target(&self, target: Target<B>) -> Surface<B> {
823        target.dispose(&self.device)
824    }
825
826    /// Get raw device.
827    pub fn device(&self) -> &Device<B> {
828        &self.device
829    }
830
831    /// Get raw physical device.
832    pub fn physical(&self) -> &B::PhysicalDevice {
833        &self.adapter.physical_device
834    }
835
836    /// Create new semaphore.
837    pub fn create_semaphore(&self) -> Result<B::Semaphore, OutOfMemory> {
838        profile_scope!("create_semaphore");
839
840        self.device.create_semaphore()
841    }
842
843    /// Destroy semaphore.
844    ///
845    /// # Safety
846    ///
847    /// Semaphore must be created by this `Factory`.
848    pub unsafe fn destroy_semaphore(&self, semaphore: B::Semaphore) {
849        self.device.destroy_semaphore(semaphore);
850    }
851
852    /// Create new fence
853    pub fn create_fence(&self, signaled: bool) -> Result<Fence<B>, OutOfMemory> {
854        Fence::new(&self.device, signaled)
855    }
856
857    /// Wait for the fence become signeled.
858    pub fn reset_fence(&self, fence: &mut Fence<B>) -> Result<(), OutOfMemory> {
859        fence.reset(&self.device)
860    }
861
862    /// Wait for the fence become signeled.
863    ///
864    /// # Safety
865    ///
866    /// Fences must be created by this `Factory`.
867    pub fn reset_fences<'a>(
868        &self,
869        fences: impl IntoIterator<Item = &'a mut (impl BorrowMut<Fence<B>> + 'a)>,
870    ) -> Result<(), OutOfMemory> {
871        let fences = fences
872            .into_iter()
873            .map(|f| {
874                let f = f.borrow_mut();
875                f.assert_device_owner(&self.device);
876                assert!(f.is_signaled());
877                f
878            })
879            .collect::<SmallVec<[_; 32]>>();
880        unsafe {
881            self.device.reset_fences(fences.iter().map(|f| f.raw()))?;
882            fences.into_iter().for_each(|f| f.mark_reset());
883        }
884        Ok(())
885    }
886
887    /// Wait for the fence become signeled.
888    pub fn wait_for_fence(
889        &self,
890        fence: &mut Fence<B>,
891        timeout_ns: u64,
892    ) -> Result<bool, OomOrDeviceLost> {
893        profile_scope!("wait_for_fence");
894
895        fence.assert_device_owner(&self.device);
896
897        if let Some(fence_epoch) = fence.wait_signaled(&self.device, timeout_ns)? {
898            // Now we can update epochs counter.
899            let family_index = self.families_indices[fence_epoch.queue.family.index];
900            let mut lock = self.epochs[family_index].write();
901            let epoch = &mut lock[fence_epoch.queue.index];
902            *epoch = max(*epoch, fence_epoch.epoch);
903
904            Ok(true)
905        } else {
906            Ok(false)
907        }
908    }
909
910    /// Wait for the fences become signeled.
911    pub fn wait_for_fences<'a>(
912        &self,
913        fences: impl IntoIterator<Item = &'a mut (impl BorrowMut<Fence<B>> + 'a)>,
914        wait_for: WaitFor,
915        timeout_ns: u64,
916    ) -> Result<bool, OomOrDeviceLost> {
917        profile_scope!("wait_for_fences");
918
919        let fences = fences
920            .into_iter()
921            .map(|f| f.borrow_mut())
922            .inspect(|f| f.assert_device_owner(&self.device))
923            .collect::<SmallVec<[_; 32]>>();
924
925        if fences.is_empty() {
926            return Ok(true);
927        }
928
929        let timeout = !unsafe {
930            self.device.wait_for_fences(
931                fences.iter().map(|f| f.raw()),
932                wait_for.clone(),
933                timeout_ns,
934            )
935        }?;
936
937        if timeout {
938            return Ok(false);
939        }
940
941        let mut epoch_locks = SmallVec::<[_; 32]>::new();
942        for fence in &fences {
943            let family_id = fence.epoch().queue.family;
944            while family_id.index >= epoch_locks.len() {
945                epoch_locks.push(None);
946            }
947        }
948
949        match wait_for {
950            WaitFor::Any => {
951                for fence in fences {
952                    if unsafe { self.device.get_fence_status(fence.raw()) }? {
953                        let epoch = unsafe { fence.mark_signaled() };
954                        let family_id = epoch.queue.family;
955                        let family_index = *self
956                            .families_indices
957                            .get(family_id.index)
958                            .expect("Valid family id expected");
959                        let lock = epoch_locks[family_id.index]
960                            .get_or_insert_with(|| self.epochs[family_index].write());
961                        let queue_epoch = &mut lock[epoch.queue.index];
962                        *queue_epoch = max(*queue_epoch, epoch.epoch);
963                    }
964                }
965            }
966            WaitFor::All => {
967                for fence in fences {
968                    // all fences signaled
969                    let epoch = unsafe { fence.mark_signaled() };
970                    let family_id = epoch.queue.family;
971                    let family_index = *self
972                        .families_indices
973                        .get(family_id.index)
974                        .expect("Valid family id expected");
975                    let lock = epoch_locks[family_id.index]
976                        .get_or_insert_with(|| self.epochs[family_index].write());
977                    let queue_epoch = &mut lock[epoch.queue.index];
978                    *queue_epoch = max(*queue_epoch, epoch.epoch);
979                }
980            }
981        }
982        Ok(true)
983    }
984
985    /// Destroy fence.
986    ///
987    /// # Safety
988    ///
989    /// Fence must be created by this `Factory`.
990    pub fn destroy_fence(&self, fence: Fence<B>) {
991        unsafe { self.device.destroy_fence(fence.into_inner()) }
992    }
993
994    /// Create new command pool for specified family.
995    pub fn create_command_pool<R>(
996        &self,
997        family: &Family<B>,
998    ) -> Result<CommandPool<B, QueueType, R>, OutOfMemory>
999    where
1000        R: Reset,
1001    {
1002        profile_scope!("create_command_pool");
1003
1004        family.create_pool(&self.device)
1005    }
1006
1007    /// Create new command pool for specified family.
1008    ///
1009    /// # Safety
1010    ///
1011    /// All command buffers allocated from the pool must be freed.
1012    pub unsafe fn destroy_command_pool<C, R>(&self, pool: CommandPool<B, C, R>)
1013    where
1014        R: Reset,
1015    {
1016        pool.dispose(&self.device);
1017    }
1018
1019    fn next_epochs(&mut self, families: &Families<B>) -> Epochs {
1020        Epochs {
1021            values: families
1022                .as_slice()
1023                .iter()
1024                .map(|f| f.as_slice().iter().map(|q| q.next_epoch()).collect())
1025                .collect(),
1026        }
1027    }
1028
1029    fn complete_epochs(&mut self) -> Epochs {
1030        Epochs {
1031            values: self
1032                .epochs
1033                .iter_mut()
1034                .map(|l| l.get_mut().iter().cloned().collect())
1035                .collect(),
1036        }
1037    }
1038
1039    /// Cleanup unused resources
1040    pub fn cleanup(&mut self, families: &Families<B>) {
1041        profile_scope!("cleanup");
1042
1043        let next = self.next_epochs(families);
1044        let complete = self.complete_epochs();
1045        unsafe {
1046            self.uploader.cleanup(&self.device);
1047            self.blitter.cleanup(&self.device);
1048            self.resources.cleanup(
1049                &self.device,
1050                self.heaps.get_mut(),
1051                self.descriptor_allocator.get_mut(),
1052                next,
1053                complete,
1054            );
1055
1056            self.descriptor_allocator.get_mut().cleanup(&self.device);
1057        }
1058    }
1059
1060    /// Flush uploads
1061    pub fn flush_uploads(&mut self, families: &mut Families<B>) {
1062        unsafe { self.uploader.flush(families) }
1063    }
1064
1065    /// Flush blits
1066    pub fn flush_blits(&mut self, families: &mut Families<B>) {
1067        unsafe { self.blitter.flush(families) }
1068    }
1069
1070    /// Flush uploads and cleanup unused resources.
1071    pub fn maintain(&mut self, families: &mut Families<B>) {
1072        self.flush_uploads(families);
1073        self.flush_blits(families);
1074        self.cleanup(families);
1075    }
1076
1077    /// Create descriptor set layout with specified bindings.
1078    pub fn create_relevant_descriptor_set_layout(
1079        &self,
1080        bindings: Vec<DescriptorSetLayoutBinding>,
1081    ) -> Result<DescriptorSetLayout<B>, OutOfMemory> {
1082        unsafe { DescriptorSetLayout::create(&self.device, DescriptorSetInfo { bindings }) }
1083    }
1084
1085    /// Create descriptor set layout with specified bindings.
1086    pub fn create_descriptor_set_layout(
1087        &self,
1088        bindings: Vec<DescriptorSetLayoutBinding>,
1089    ) -> Result<Escape<DescriptorSetLayout<B>>, OutOfMemory> {
1090        let layout = self.create_relevant_descriptor_set_layout(bindings)?;
1091        Ok(self.resources.layouts.escape(layout))
1092    }
1093
1094    /// Create descriptor sets with specified layout.
1095    pub fn create_relevant_descriptor_set(
1096        &self,
1097        layout: Handle<DescriptorSetLayout<B>>,
1098    ) -> Result<DescriptorSet<B>, OutOfMemory> {
1099        // TODO: Check `layout` belongs to this factory.
1100        unsafe {
1101            DescriptorSet::create(&self.device, &mut self.descriptor_allocator.lock(), layout)
1102        }
1103    }
1104
1105    /// Create descriptor sets with specified layout.
1106    pub fn create_descriptor_set(
1107        &self,
1108        layout: Handle<DescriptorSetLayout<B>>,
1109    ) -> Result<Escape<DescriptorSet<B>>, OutOfMemory> {
1110        let set = self.create_relevant_descriptor_set(layout)?;
1111        Ok(self.resources.sets.escape(set))
1112    }
1113
1114    /// Create descriptor sets with specified layout.
1115    ///
1116    /// # Safety
1117    ///
1118    /// `layout` must be created by this `Factory`.
1119    ///
1120    pub fn create_descriptor_sets<T>(
1121        &self,
1122        layout: Handle<DescriptorSetLayout<B>>,
1123        count: u32,
1124    ) -> Result<T, OutOfMemory>
1125    where
1126        T: std::iter::FromIterator<Escape<DescriptorSet<B>>>,
1127    {
1128        profile_scope!("create_descriptor_sets");
1129
1130        let mut result = SmallVec::<[_; 32]>::new();
1131        unsafe {
1132            DescriptorSet::create_many(
1133                &self.device,
1134                &mut self.descriptor_allocator.lock(),
1135                layout,
1136                count,
1137                &mut result,
1138            )
1139        }?;
1140
1141        Ok(result
1142            .into_iter()
1143            .map(|set| self.resources.sets.escape(set))
1144            .collect())
1145    }
1146
1147    /// Query memory utilization.
1148    pub fn memory_utilization(&self) -> TotalMemoryUtilization {
1149        self.heaps.lock().utilization()
1150    }
1151
1152    /// Get Factory's instance id.
1153    pub fn instance_id(&self) -> InstanceId {
1154        self.device.id().instance
1155    }
1156}
1157
1158impl<B> std::ops::Deref for Factory<B>
1159where
1160    B: Backend,
1161{
1162    type Target = Device<B>;
1163
1164    fn deref(&self) -> &Device<B> {
1165        &self.device
1166    }
1167}
1168
1169/// Initialize `Factory` and Queue `Families` associated with Device
1170/// using existing `Instance`.
1171pub fn init_with_instance<B>(
1172    instance: Instance<B>,
1173    config: &Config<impl DevicesConfigure, impl HeapsConfigure, impl QueuesConfigure>,
1174) -> Result<(Factory<B>, Families<B>), CreationError>
1175where
1176    B: Backend,
1177{
1178    let (mut factory, families) = init_with_instance_ref(&instance, config)?;
1179    factory.instance = InstanceOrId::Instance(instance);
1180    Ok((factory, families))
1181}
1182
1183/// Initialize `Factory` and Queue `Families` associated with Device
1184/// using existing `Instance`.
1185pub fn init_with_instance_ref<B>(
1186    instance: &Instance<B>,
1187    config: &Config<impl DevicesConfigure, impl HeapsConfigure, impl QueuesConfigure>,
1188) -> Result<(Factory<B>, Families<B>), CreationError>
1189where
1190    B: Backend,
1191{
1192    rendy_with_slow_safety_checks!(
1193        log::warn!("Slow safety checks are enabled! Disable them in production by enabling the 'no-slow-safety-checks' feature!")
1194    );
1195    let mut adapters = instance.enumerate_adapters();
1196
1197    if adapters.is_empty() {
1198        log::warn!("No physical devices found");
1199        return Err(rendy_core::hal::device::CreationError::InitializationFailed);
1200    }
1201
1202    log::debug!(
1203        "Physical devices:\n{:#?}",
1204        adapters
1205            .iter()
1206            .map(|adapter| &adapter.info)
1207            .collect::<SmallVec<[_; 32]>>()
1208    );
1209
1210    let picked = config.devices.pick(&adapters);
1211    if picked >= adapters.len() {
1212        panic!("Physical device pick config returned index out of bound");
1213    }
1214    let adapter = adapters.swap_remove(picked);
1215
1216    #[derive(Debug)]
1217    struct PhysicalDeviceInfo<'a> {
1218        name: &'a str,
1219        features: Features,
1220        limits: Limits,
1221    }
1222
1223    log::debug!(
1224        "Physical device picked: {:#?}",
1225        PhysicalDeviceInfo {
1226            name: &adapter.info.name,
1227            features: adapter.physical_device.features(),
1228            limits: adapter.physical_device.limits(),
1229        }
1230    );
1231
1232    let device_id = DeviceId::new(instance.id());
1233
1234    let (device, families) = {
1235        let families = config
1236            .queues
1237            .configure(device_id, &adapter.queue_families)
1238            .into_iter()
1239            .collect::<SmallVec<[_; 16]>>();
1240        let (create_queues, get_queues): (SmallVec<[_; 32]>, SmallVec<[_; 32]>) = families
1241            .iter()
1242            .map(|(index, priorities)| {
1243                (
1244                    (&adapter.queue_families[index.index], priorities.as_ref()),
1245                    (*index, priorities.as_ref().len()),
1246                )
1247            })
1248            .unzip();
1249
1250        log::debug!("Queues: {:#?}", get_queues);
1251
1252        let Gpu {
1253            device,
1254            mut queue_groups,
1255        } = unsafe {
1256            adapter
1257                .physical_device
1258                .open(&create_queues, adapter.physical_device.features())
1259        }?;
1260
1261        let families = unsafe {
1262            families_from_device(
1263                device_id,
1264                &mut queue_groups,
1265                get_queues,
1266                &adapter.queue_families,
1267            )
1268        };
1269        (device, families)
1270    };
1271
1272    let device = Device::from_raw(device, device_id);
1273
1274    let (types, heaps) = config
1275        .heaps
1276        .configure(&adapter.physical_device.memory_properties());
1277    let heaps = heaps.into_iter().collect::<SmallVec<[_; 16]>>();
1278    let types = types.into_iter().collect::<SmallVec<[_; 32]>>();
1279
1280    log::debug!("Heaps: {:#?}\nTypes: {:#?}", heaps, types);
1281
1282    let heaps = unsafe { Heaps::new(types, heaps) };
1283
1284    let epochs = families
1285        .as_slice()
1286        .iter()
1287        .map(|f| parking_lot::RwLock::new(vec![0; f.as_slice().len()]))
1288        .collect();
1289
1290    let factory = Factory {
1291        descriptor_allocator: ManuallyDrop::new(
1292            parking_lot::Mutex::new(DescriptorAllocator::new()),
1293        ),
1294        heaps: ManuallyDrop::new(parking_lot::Mutex::new(heaps)),
1295        resources: ManuallyDrop::new(ResourceHub::default()),
1296        uploader: unsafe { Uploader::new(&device, &families) }
1297            .map_err(rendy_core::hal::device::CreationError::OutOfMemory)?,
1298        blitter: unsafe { Blitter::new(&device, &families) }
1299            .map_err(rendy_core::hal::device::CreationError::OutOfMemory)?,
1300        families_indices: families.indices().into(),
1301        epochs,
1302        device,
1303        adapter,
1304        instance: InstanceOrId::Id(instance.id()),
1305    };
1306
1307    Ok((factory, families))
1308}