1use {
2 crate::{
3 blitter::Blitter,
4 command::{
5 families_from_device, CommandPool, Families, Family, FamilyId, Fence, QueueType, Reset,
6 },
7 config::{Config, DevicesConfigure, HeapsConfigure, QueuesConfigure},
8 core::{rendy_with_slow_safety_checks, Device, DeviceId, Instance, InstanceId},
9 descriptor::DescriptorAllocator,
10 memory::{self, Heaps, MemoryUsage, TotalMemoryUtilization, Write},
11 resource::*,
12 upload::{BufferState, ImageState, ImageStateOrLayout, Uploader},
13 wsi::{Surface, SwapchainError, Target},
14 },
15 rendy_core::{
16 hal::{
17 adapter::{Adapter, Gpu, PhysicalDevice},
18 buffer,
19 device::{
20 AllocationError, CreationError, Device as _, MapError, OomOrDeviceLost,
21 OutOfMemory, WaitFor,
22 },
23 format, image,
24 pso::DescriptorSetLayoutBinding,
25 window::{Extent2D, InitError, Surface as GfxSurface},
26 Backend, Features, Instance as _, Limits,
27 },
28 HasRawWindowHandle,
29 },
30 smallvec::SmallVec,
31 std::{borrow::BorrowMut, cmp::max, mem::ManuallyDrop},
32 thread_profiler::profile_scope,
33};
34
35#[derive(Debug)]
36struct ResourceHub<B: Backend> {
37 buffers: ResourceTracker<Buffer<B>>,
38 images: ResourceTracker<Image<B>>,
39 views: ResourceTracker<ImageView<B>>,
40 layouts: ResourceTracker<DescriptorSetLayout<B>>,
41 sets: ResourceTracker<DescriptorSet<B>>,
42 samplers: ResourceTracker<Sampler<B>>,
43 samplers_cache: parking_lot::RwLock<SamplerCache<B>>,
44}
45
46impl<B> Default for ResourceHub<B>
47where
48 B: Backend,
49{
50 fn default() -> Self {
51 ResourceHub {
52 buffers: ResourceTracker::default(),
53 images: ResourceTracker::default(),
54 views: ResourceTracker::default(),
55 layouts: ResourceTracker::default(),
56 sets: ResourceTracker::default(),
57 samplers: ResourceTracker::default(),
58 samplers_cache: parking_lot::RwLock::new(SamplerCache::default()),
59 }
60 }
61}
62
63impl<B> ResourceHub<B>
64where
65 B: Backend,
66{
67 unsafe fn cleanup(
68 &mut self,
69 device: &Device<B>,
70 heaps: &mut Heaps<B>,
71 allocator: &mut DescriptorAllocator<B>,
72 next: Epochs,
73 complete: Epochs,
74 ) {
75 self.sets
76 .cleanup(|s| s.dispose(allocator), &next, &complete);
77 self.views.cleanup(|v| v.dispose(device), &next, &complete);
78 self.layouts
79 .cleanup(|l| l.dispose(device), &next, &complete);
80 self.buffers
81 .cleanup(|b| b.dispose(device, heaps), &next, &complete);
82 self.images
83 .cleanup(|i| i.dispose(device, heaps), &next, &complete);
84 self.samplers
85 .cleanup(|i| i.dispose(device), &next, &complete);
86 }
87
88 unsafe fn dispose(
89 mut self,
90 device: &Device<B>,
91 heaps: &mut Heaps<B>,
92 allocator: &mut DescriptorAllocator<B>,
93 ) {
94 drop(self.samplers_cache);
95 self.sets.dispose(|s| s.dispose(allocator));
96 self.views.dispose(|v| v.dispose(device));
97 self.layouts.dispose(|l| l.dispose(device));
98 self.buffers.dispose(|b| b.dispose(device, heaps));
99 self.images.dispose(|i| i.dispose(device, heaps));
100 self.samplers.dispose(|i| i.dispose(device));
101 }
102}
103
104#[derive(Clone, Debug, PartialEq)]
106pub enum UploadError {
107 Create(BufferCreationError),
109 Map(MapError),
111 Upload(OutOfMemory),
113}
114
115impl std::fmt::Display for UploadError {
116 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
117 match self {
118 UploadError::Create(err) => write!(fmt, "Upload failed: {:?}", err),
119 UploadError::Map(err) => write!(fmt, "Upload failed: {:?}", err),
120 UploadError::Upload(err) => write!(fmt, "Upload failed: {:?}", err),
121 }
122 }
123}
124
125impl std::error::Error for UploadError {
126 fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
127 match self {
128 UploadError::Create(err) => Some(err),
129 UploadError::Map(err) => Some(err),
130 UploadError::Upload(err) => Some(err),
131 }
132 }
133}
134
135#[derive(Debug)]
136enum InstanceOrId<B: Backend> {
137 Instance(Instance<B>),
138 Id(InstanceId),
139}
140
141impl<B> InstanceOrId<B>
142where
143 B: Backend,
144{
145 fn id(&self) -> InstanceId {
146 match self {
147 InstanceOrId::Instance(instance) => instance.id(),
148 InstanceOrId::Id(id) => *id,
149 }
150 }
151
152 fn as_instance(&self) -> Option<&Instance<B>> {
153 match self {
154 InstanceOrId::Instance(instance) => Some(instance),
155 InstanceOrId::Id(_) => None,
156 }
157 }
158}
159
160#[derive(Debug)]
163pub struct Factory<B: Backend> {
164 descriptor_allocator: ManuallyDrop<parking_lot::Mutex<DescriptorAllocator<B>>>,
165 heaps: ManuallyDrop<parking_lot::Mutex<Heaps<B>>>,
166 resources: ManuallyDrop<ResourceHub<B>>,
167 epochs: Vec<parking_lot::RwLock<Vec<u64>>>,
168 uploader: Uploader<B>,
169 blitter: Blitter<B>,
170 families_indices: Vec<usize>,
171 device: Device<B>,
172 adapter: Adapter<B>,
173 instance: InstanceOrId<B>,
174}
175
176#[allow(unused)]
177fn factory_is_send_sync<B: Backend>() {
178 fn is_send_sync<T: Send + Sync>() {}
179 is_send_sync::<Factory<B>>();
180}
181
182impl<B> Drop for Factory<B>
183where
184 B: Backend,
185{
186 fn drop(&mut self) {
187 log::debug!("Dropping factory");
188 self.wait_idle().unwrap();
189
190 unsafe {
191 self.uploader.dispose(&self.device);
193 log::trace!("Uploader disposed");
194 self.blitter.dispose(&self.device);
195 log::trace!("Blitter disposed");
196 std::ptr::read(&mut *self.resources).dispose(
197 &self.device,
198 self.heaps.get_mut(),
199 self.descriptor_allocator.get_mut(),
200 );
201
202 log::trace!("Resources disposed");
203 }
204
205 unsafe {
206 std::ptr::read(&mut *self.heaps)
207 .into_inner()
208 .dispose(&self.device);
209 log::trace!("Heaps disposed");
210 }
211
212 unsafe {
213 std::ptr::read(&mut *self.descriptor_allocator)
214 .into_inner()
215 .dispose(&self.device);
216 log::trace!("Descriptor allocator disposed");
217 }
218
219 log::trace!("Factory dropped");
220 }
221}
222
223impl<B> Factory<B>
224where
225 B: Backend,
226{
227 pub fn wait_idle(&self) -> Result<(), OutOfMemory> {
231 profile_scope!("wait_idle");
232
233 log::debug!("Wait device idle");
234 self.device.wait_idle()?;
235 log::trace!("Device idle");
236 Ok(())
237 }
238
239 pub fn create_relevant_buffer(
246 &self,
247 info: BufferInfo,
248 memory_usage: impl MemoryUsage,
249 ) -> Result<Buffer<B>, BufferCreationError> {
250 profile_scope!("create_relevant_buffer");
251
252 unsafe { Buffer::create(&self.device, &mut self.heaps.lock(), info, memory_usage) }
253 }
254
255 pub unsafe fn destroy_relevant_buffer(&self, buffer: Buffer<B>) {
266 buffer.dispose(&self.device, &mut self.heaps.lock());
267 }
268
269 pub fn create_buffer(
275 &self,
276 info: BufferInfo,
277 memory_usage: impl MemoryUsage,
278 ) -> Result<Escape<Buffer<B>>, BufferCreationError> {
279 let buffer = self.create_relevant_buffer(info, memory_usage)?;
280 Ok(self.resources.buffers.escape(buffer))
281 }
282
283 pub fn create_relevant_image(
290 &self,
291 info: ImageInfo,
292 memory_usage: impl MemoryUsage,
293 ) -> Result<Image<B>, ImageCreationError> {
294 profile_scope!("create_relevant_image");
295
296 unsafe { Image::create(&self.device, &mut self.heaps.lock(), info, memory_usage) }
297 }
298
299 pub unsafe fn destroy_relevant_image(&self, image: Image<B>) {
310 image.dispose(&self.device, &mut self.heaps.lock());
311 }
312
313 pub fn create_image(
319 &self,
320 info: ImageInfo,
321 memory_usage: impl MemoryUsage,
322 ) -> Result<Escape<Image<B>>, ImageCreationError> {
323 let image = self.create_relevant_image(info, memory_usage)?;
324 Ok(self.resources.images.escape(image))
325 }
326
327 pub fn image_format_properties(&self, info: ImageInfo) -> Option<FormatProperties> {
329 self.physical().image_format_properties(
330 info.format,
331 match info.kind {
332 Kind::D1(_, _) => 1,
333 Kind::D2(_, _, _, _) => 2,
334 Kind::D3(_, _, _) => 3,
335 },
336 info.tiling,
337 info.usage,
338 info.view_caps,
339 )
340 }
341
342 pub fn create_relevant_image_view(
349 &self,
350 image: Handle<Image<B>>,
351 info: ImageViewInfo,
352 ) -> Result<ImageView<B>, ImageViewCreationError> {
353 ImageView::create(&self.device, info, image)
354 }
355
356 pub unsafe fn destroy_relevant_image_view(&self, view: ImageView<B>) {
367 view.dispose(&self.device);
368 }
369
370 pub fn create_image_view(
376 &self,
377 image: Handle<Image<B>>,
378 info: ImageViewInfo,
379 ) -> Result<Escape<ImageView<B>>, ImageViewCreationError> {
380 let view = self.create_relevant_image_view(image, info)?;
381 Ok(self.resources.views.escape(view))
382 }
383
384 pub fn create_relevant_sampler(
391 &self,
392 info: SamplerDesc,
393 ) -> Result<Sampler<B>, AllocationError> {
394 Sampler::create(&self.device, info)
395 }
396
397 pub unsafe fn destroy_relevant_sampler(&self, sampler: Sampler<B>) {
411 sampler.dispose(&self.device);
412 }
413
414 pub fn create_sampler(&self, info: SamplerDesc) -> Result<Escape<Sampler<B>>, AllocationError> {
420 let sampler = self.create_relevant_sampler(info)?;
421 Ok(self.resources.samplers.escape(sampler))
422 }
423
424 pub fn get_sampler(&self, info: SamplerDesc) -> Result<Handle<Sampler<B>>, AllocationError> {
431 let samplers = &self.resources.samplers;
432 let device = &self.device;
433
434 SamplerCache::get_with_upgradable_lock(
435 self.resources.samplers_cache.upgradable_read(),
436 parking_lot::RwLockUpgradableReadGuard::upgrade,
437 info.clone(),
438 || Ok(samplers.handle(Sampler::create(device, info)?)),
439 )
440 }
441
442 pub unsafe fn upload_visible_buffer<T>(
461 &self,
462 buffer: &mut Buffer<B>,
463 offset: u64,
464 content: &[T],
465 ) -> Result<(), MapError>
466 where
467 T: 'static + Copy,
468 {
469 let content = std::slice::from_raw_parts(
470 content.as_ptr() as *const u8,
471 content.len() * std::mem::size_of::<T>(),
472 );
473
474 let mut mapped = buffer.map(&self.device, offset..offset + content.len() as u64)?;
475 mapped
476 .write(&self.device, 0..content.len() as u64)?
477 .write(content);
478 Ok(())
479 }
480
481 pub unsafe fn upload_buffer<T>(
503 &self,
504 buffer: &Buffer<B>,
505 offset: u64,
506 content: &[T],
507 last: Option<BufferState>,
508 next: BufferState,
509 ) -> Result<(), UploadError>
510 where
511 T: 'static + Copy,
512 {
513 assert!(buffer.info().usage.contains(buffer::Usage::TRANSFER_DST));
514
515 let content_size = content.len() as u64 * std::mem::size_of::<T>() as u64;
516 let mut staging = self
517 .create_buffer(
518 BufferInfo {
519 size: content_size,
520 usage: buffer::Usage::TRANSFER_SRC,
521 },
522 memory::Upload,
523 )
524 .map_err(UploadError::Create)?;
525
526 self.upload_visible_buffer(&mut staging, 0, content)
527 .map_err(UploadError::Map)?;
528
529 self.uploader
530 .upload_buffer(&self.device, buffer, offset, staging, last, next)
531 .map_err(UploadError::Upload)
532 }
533
534 pub unsafe fn upload_from_staging_buffer(
549 &self,
550 buffer: &Buffer<B>,
551 offset: u64,
552 staging: Escape<Buffer<B>>,
553 last: Option<BufferState>,
554 next: BufferState,
555 ) -> Result<(), OutOfMemory> {
556 assert!(buffer.info().usage.contains(buffer::Usage::TRANSFER_DST));
557 assert!(staging.info().usage.contains(buffer::Usage::TRANSFER_SRC));
558 self.uploader
559 .upload_buffer(&self.device, buffer, offset, staging, last, next)
560 }
561
562 pub unsafe fn transition_image(
577 &self,
578 image: Handle<Image<B>>,
579 image_range: SubresourceRange,
580 last: impl Into<ImageStateOrLayout>,
581 next: ImageState,
582 ) {
583 self.uploader
584 .transition_image(image, image_range, last.into(), next);
585 }
586
587 pub unsafe fn upload_image<T>(
609 &self,
610 image: Handle<Image<B>>,
611 data_width: u32,
612 data_height: u32,
613 image_layers: SubresourceLayers,
614 image_offset: image::Offset,
615 image_extent: Extent,
616 content: &[T],
617 last: impl Into<ImageStateOrLayout>,
618 next: ImageState,
619 ) -> Result<(), UploadError>
620 where
621 T: 'static + Copy,
622 {
623 assert!(image.info().usage.contains(image::Usage::TRANSFER_DST));
624 assert_eq!(image.format().surface_desc().aspects, image_layers.aspects);
625 assert!(image_layers.layers.start <= image_layers.layers.end);
626 assert!(image_layers.layers.end <= image.kind().num_layers());
627 assert!(image_layers.level <= image.info().levels);
628
629 let content_size = content.len() as u64 * std::mem::size_of::<T>() as u64;
630 let format_desc = image.format().surface_desc();
631 let texels_count = (image_extent.width / format_desc.dim.0 as u32) as u64
632 * (image_extent.height / format_desc.dim.1 as u32) as u64
633 * image_extent.depth as u64
634 * (image_layers.layers.end - image_layers.layers.start) as u64;
635 let total_bytes = (format_desc.bits as u64 / 8) * texels_count;
636 assert_eq!(
637 total_bytes, content_size,
638 "Size of must match size of the image region"
639 );
640
641 let mut staging = self
642 .create_buffer(
643 BufferInfo {
644 size: content_size,
645 usage: buffer::Usage::TRANSFER_SRC,
646 },
647 memory::Upload,
648 )
649 .map_err(UploadError::Create)?;
650
651 self.upload_visible_buffer(&mut staging, 0, content)
652 .map_err(UploadError::Map)?;
653
654 self.uploader
655 .upload_image(
656 &self.device,
657 image,
658 data_width,
659 data_height,
660 image_layers,
661 image_offset,
662 image_extent,
663 staging,
664 last.into(),
665 next,
666 )
667 .map_err(UploadError::Upload)
668 }
669
670 pub fn blitter(&self) -> &Blitter<B> {
672 &self.blitter
673 }
674
675 pub fn create_surface(
677 &mut self,
678 handle: &impl HasRawWindowHandle,
679 ) -> Result<Surface<B>, InitError> {
680 profile_scope!("create_surface");
681 Surface::new(
682 self.instance
683 .as_instance()
684 .expect("Cannot create surface without instance"),
685 handle,
686 )
687 }
688
689 pub unsafe fn create_surface_with(
695 &mut self,
696 f: impl FnOnce(&B::Instance) -> B::Surface,
697 ) -> Surface<B> {
698 profile_scope!("create_surface");
699 Surface::new_with(
700 self.instance
701 .as_instance()
702 .expect("Cannot create surface without instance"),
703 f,
704 )
705 }
706
707 pub fn get_surface_formats(
713 &self,
714 surface: &Surface<B>,
715 ) -> Option<Vec<rendy_core::hal::format::Format>> {
716 profile_scope!("get_surface_compatibility");
717
718 assert_eq!(
719 surface.instance_id(),
720 self.instance.id(),
721 "Resource is not owned by specified instance"
722 );
723 unsafe { surface.supported_formats(&self.adapter.physical_device) }
724 }
725
726 pub fn get_surface_capabilities(
732 &self,
733 surface: &Surface<B>,
734 ) -> rendy_core::hal::window::SurfaceCapabilities {
735 profile_scope!("get_surface_compatibility");
736
737 assert_eq!(
738 surface.instance_id(),
739 self.instance.id(),
740 "Resource is not owned by specified instance"
741 );
742 unsafe { surface.capabilities(&self.adapter.physical_device) }
743 }
744
745 pub fn get_surface_format(&self, surface: &Surface<B>) -> format::Format {
751 profile_scope!("get_surface_format");
752
753 assert_eq!(
754 surface.instance_id(),
755 self.instance.id(),
756 "Resource is not owned by specified instance"
757 );
758 unsafe { surface.format(&self.adapter.physical_device) }
759 }
760
761 pub fn surface_support(&self, family: FamilyId, surface: &Surface<B>) -> bool {
763 assert_eq!(
764 surface.instance_id(),
765 self.instance.id(),
766 "Resource is not owned by specified instance"
767 );
768 surface
769 .raw()
770 .supports_queue_family(&self.adapter.queue_families[family.index])
771 }
772
773 pub fn destroy_surface(&mut self, surface: Surface<B>) {
779 assert_eq!(
780 surface.instance_id(),
781 self.instance.id(),
782 "Resource is not owned by specified instance"
783 );
784 drop(surface);
785 }
786
787 pub fn create_target(
796 &self,
797 surface: Surface<B>,
798 extent: Extent2D,
799 image_count: u32,
800 present_mode: rendy_core::hal::window::PresentMode,
801 usage: image::Usage,
802 ) -> Result<Target<B>, SwapchainError> {
803 profile_scope!("create_target");
804
805 unsafe {
806 surface.into_target(
807 &self.adapter.physical_device,
808 &self.device,
809 extent,
810 image_count,
811 present_mode,
812 usage,
813 )
814 }
815 }
816
817 pub unsafe fn destroy_target(&self, target: Target<B>) -> Surface<B> {
823 target.dispose(&self.device)
824 }
825
826 pub fn device(&self) -> &Device<B> {
828 &self.device
829 }
830
831 pub fn physical(&self) -> &B::PhysicalDevice {
833 &self.adapter.physical_device
834 }
835
836 pub fn create_semaphore(&self) -> Result<B::Semaphore, OutOfMemory> {
838 profile_scope!("create_semaphore");
839
840 self.device.create_semaphore()
841 }
842
843 pub unsafe fn destroy_semaphore(&self, semaphore: B::Semaphore) {
849 self.device.destroy_semaphore(semaphore);
850 }
851
852 pub fn create_fence(&self, signaled: bool) -> Result<Fence<B>, OutOfMemory> {
854 Fence::new(&self.device, signaled)
855 }
856
857 pub fn reset_fence(&self, fence: &mut Fence<B>) -> Result<(), OutOfMemory> {
859 fence.reset(&self.device)
860 }
861
862 pub fn reset_fences<'a>(
868 &self,
869 fences: impl IntoIterator<Item = &'a mut (impl BorrowMut<Fence<B>> + 'a)>,
870 ) -> Result<(), OutOfMemory> {
871 let fences = fences
872 .into_iter()
873 .map(|f| {
874 let f = f.borrow_mut();
875 f.assert_device_owner(&self.device);
876 assert!(f.is_signaled());
877 f
878 })
879 .collect::<SmallVec<[_; 32]>>();
880 unsafe {
881 self.device.reset_fences(fences.iter().map(|f| f.raw()))?;
882 fences.into_iter().for_each(|f| f.mark_reset());
883 }
884 Ok(())
885 }
886
887 pub fn wait_for_fence(
889 &self,
890 fence: &mut Fence<B>,
891 timeout_ns: u64,
892 ) -> Result<bool, OomOrDeviceLost> {
893 profile_scope!("wait_for_fence");
894
895 fence.assert_device_owner(&self.device);
896
897 if let Some(fence_epoch) = fence.wait_signaled(&self.device, timeout_ns)? {
898 let family_index = self.families_indices[fence_epoch.queue.family.index];
900 let mut lock = self.epochs[family_index].write();
901 let epoch = &mut lock[fence_epoch.queue.index];
902 *epoch = max(*epoch, fence_epoch.epoch);
903
904 Ok(true)
905 } else {
906 Ok(false)
907 }
908 }
909
910 pub fn wait_for_fences<'a>(
912 &self,
913 fences: impl IntoIterator<Item = &'a mut (impl BorrowMut<Fence<B>> + 'a)>,
914 wait_for: WaitFor,
915 timeout_ns: u64,
916 ) -> Result<bool, OomOrDeviceLost> {
917 profile_scope!("wait_for_fences");
918
919 let fences = fences
920 .into_iter()
921 .map(|f| f.borrow_mut())
922 .inspect(|f| f.assert_device_owner(&self.device))
923 .collect::<SmallVec<[_; 32]>>();
924
925 if fences.is_empty() {
926 return Ok(true);
927 }
928
929 let timeout = !unsafe {
930 self.device.wait_for_fences(
931 fences.iter().map(|f| f.raw()),
932 wait_for.clone(),
933 timeout_ns,
934 )
935 }?;
936
937 if timeout {
938 return Ok(false);
939 }
940
941 let mut epoch_locks = SmallVec::<[_; 32]>::new();
942 for fence in &fences {
943 let family_id = fence.epoch().queue.family;
944 while family_id.index >= epoch_locks.len() {
945 epoch_locks.push(None);
946 }
947 }
948
949 match wait_for {
950 WaitFor::Any => {
951 for fence in fences {
952 if unsafe { self.device.get_fence_status(fence.raw()) }? {
953 let epoch = unsafe { fence.mark_signaled() };
954 let family_id = epoch.queue.family;
955 let family_index = *self
956 .families_indices
957 .get(family_id.index)
958 .expect("Valid family id expected");
959 let lock = epoch_locks[family_id.index]
960 .get_or_insert_with(|| self.epochs[family_index].write());
961 let queue_epoch = &mut lock[epoch.queue.index];
962 *queue_epoch = max(*queue_epoch, epoch.epoch);
963 }
964 }
965 }
966 WaitFor::All => {
967 for fence in fences {
968 let epoch = unsafe { fence.mark_signaled() };
970 let family_id = epoch.queue.family;
971 let family_index = *self
972 .families_indices
973 .get(family_id.index)
974 .expect("Valid family id expected");
975 let lock = epoch_locks[family_id.index]
976 .get_or_insert_with(|| self.epochs[family_index].write());
977 let queue_epoch = &mut lock[epoch.queue.index];
978 *queue_epoch = max(*queue_epoch, epoch.epoch);
979 }
980 }
981 }
982 Ok(true)
983 }
984
985 pub fn destroy_fence(&self, fence: Fence<B>) {
991 unsafe { self.device.destroy_fence(fence.into_inner()) }
992 }
993
994 pub fn create_command_pool<R>(
996 &self,
997 family: &Family<B>,
998 ) -> Result<CommandPool<B, QueueType, R>, OutOfMemory>
999 where
1000 R: Reset,
1001 {
1002 profile_scope!("create_command_pool");
1003
1004 family.create_pool(&self.device)
1005 }
1006
1007 pub unsafe fn destroy_command_pool<C, R>(&self, pool: CommandPool<B, C, R>)
1013 where
1014 R: Reset,
1015 {
1016 pool.dispose(&self.device);
1017 }
1018
1019 fn next_epochs(&mut self, families: &Families<B>) -> Epochs {
1020 Epochs {
1021 values: families
1022 .as_slice()
1023 .iter()
1024 .map(|f| f.as_slice().iter().map(|q| q.next_epoch()).collect())
1025 .collect(),
1026 }
1027 }
1028
1029 fn complete_epochs(&mut self) -> Epochs {
1030 Epochs {
1031 values: self
1032 .epochs
1033 .iter_mut()
1034 .map(|l| l.get_mut().iter().cloned().collect())
1035 .collect(),
1036 }
1037 }
1038
1039 pub fn cleanup(&mut self, families: &Families<B>) {
1041 profile_scope!("cleanup");
1042
1043 let next = self.next_epochs(families);
1044 let complete = self.complete_epochs();
1045 unsafe {
1046 self.uploader.cleanup(&self.device);
1047 self.blitter.cleanup(&self.device);
1048 self.resources.cleanup(
1049 &self.device,
1050 self.heaps.get_mut(),
1051 self.descriptor_allocator.get_mut(),
1052 next,
1053 complete,
1054 );
1055
1056 self.descriptor_allocator.get_mut().cleanup(&self.device);
1057 }
1058 }
1059
1060 pub fn flush_uploads(&mut self, families: &mut Families<B>) {
1062 unsafe { self.uploader.flush(families) }
1063 }
1064
1065 pub fn flush_blits(&mut self, families: &mut Families<B>) {
1067 unsafe { self.blitter.flush(families) }
1068 }
1069
1070 pub fn maintain(&mut self, families: &mut Families<B>) {
1072 self.flush_uploads(families);
1073 self.flush_blits(families);
1074 self.cleanup(families);
1075 }
1076
1077 pub fn create_relevant_descriptor_set_layout(
1079 &self,
1080 bindings: Vec<DescriptorSetLayoutBinding>,
1081 ) -> Result<DescriptorSetLayout<B>, OutOfMemory> {
1082 unsafe { DescriptorSetLayout::create(&self.device, DescriptorSetInfo { bindings }) }
1083 }
1084
1085 pub fn create_descriptor_set_layout(
1087 &self,
1088 bindings: Vec<DescriptorSetLayoutBinding>,
1089 ) -> Result<Escape<DescriptorSetLayout<B>>, OutOfMemory> {
1090 let layout = self.create_relevant_descriptor_set_layout(bindings)?;
1091 Ok(self.resources.layouts.escape(layout))
1092 }
1093
1094 pub fn create_relevant_descriptor_set(
1096 &self,
1097 layout: Handle<DescriptorSetLayout<B>>,
1098 ) -> Result<DescriptorSet<B>, OutOfMemory> {
1099 unsafe {
1101 DescriptorSet::create(&self.device, &mut self.descriptor_allocator.lock(), layout)
1102 }
1103 }
1104
1105 pub fn create_descriptor_set(
1107 &self,
1108 layout: Handle<DescriptorSetLayout<B>>,
1109 ) -> Result<Escape<DescriptorSet<B>>, OutOfMemory> {
1110 let set = self.create_relevant_descriptor_set(layout)?;
1111 Ok(self.resources.sets.escape(set))
1112 }
1113
1114 pub fn create_descriptor_sets<T>(
1121 &self,
1122 layout: Handle<DescriptorSetLayout<B>>,
1123 count: u32,
1124 ) -> Result<T, OutOfMemory>
1125 where
1126 T: std::iter::FromIterator<Escape<DescriptorSet<B>>>,
1127 {
1128 profile_scope!("create_descriptor_sets");
1129
1130 let mut result = SmallVec::<[_; 32]>::new();
1131 unsafe {
1132 DescriptorSet::create_many(
1133 &self.device,
1134 &mut self.descriptor_allocator.lock(),
1135 layout,
1136 count,
1137 &mut result,
1138 )
1139 }?;
1140
1141 Ok(result
1142 .into_iter()
1143 .map(|set| self.resources.sets.escape(set))
1144 .collect())
1145 }
1146
1147 pub fn memory_utilization(&self) -> TotalMemoryUtilization {
1149 self.heaps.lock().utilization()
1150 }
1151
1152 pub fn instance_id(&self) -> InstanceId {
1154 self.device.id().instance
1155 }
1156}
1157
1158impl<B> std::ops::Deref for Factory<B>
1159where
1160 B: Backend,
1161{
1162 type Target = Device<B>;
1163
1164 fn deref(&self) -> &Device<B> {
1165 &self.device
1166 }
1167}
1168
1169pub fn init_with_instance<B>(
1172 instance: Instance<B>,
1173 config: &Config<impl DevicesConfigure, impl HeapsConfigure, impl QueuesConfigure>,
1174) -> Result<(Factory<B>, Families<B>), CreationError>
1175where
1176 B: Backend,
1177{
1178 let (mut factory, families) = init_with_instance_ref(&instance, config)?;
1179 factory.instance = InstanceOrId::Instance(instance);
1180 Ok((factory, families))
1181}
1182
1183pub fn init_with_instance_ref<B>(
1186 instance: &Instance<B>,
1187 config: &Config<impl DevicesConfigure, impl HeapsConfigure, impl QueuesConfigure>,
1188) -> Result<(Factory<B>, Families<B>), CreationError>
1189where
1190 B: Backend,
1191{
1192 rendy_with_slow_safety_checks!(
1193 log::warn!("Slow safety checks are enabled! Disable them in production by enabling the 'no-slow-safety-checks' feature!")
1194 );
1195 let mut adapters = instance.enumerate_adapters();
1196
1197 if adapters.is_empty() {
1198 log::warn!("No physical devices found");
1199 return Err(rendy_core::hal::device::CreationError::InitializationFailed);
1200 }
1201
1202 log::debug!(
1203 "Physical devices:\n{:#?}",
1204 adapters
1205 .iter()
1206 .map(|adapter| &adapter.info)
1207 .collect::<SmallVec<[_; 32]>>()
1208 );
1209
1210 let picked = config.devices.pick(&adapters);
1211 if picked >= adapters.len() {
1212 panic!("Physical device pick config returned index out of bound");
1213 }
1214 let adapter = adapters.swap_remove(picked);
1215
1216 #[derive(Debug)]
1217 struct PhysicalDeviceInfo<'a> {
1218 name: &'a str,
1219 features: Features,
1220 limits: Limits,
1221 }
1222
1223 log::debug!(
1224 "Physical device picked: {:#?}",
1225 PhysicalDeviceInfo {
1226 name: &adapter.info.name,
1227 features: adapter.physical_device.features(),
1228 limits: adapter.physical_device.limits(),
1229 }
1230 );
1231
1232 let device_id = DeviceId::new(instance.id());
1233
1234 let (device, families) = {
1235 let families = config
1236 .queues
1237 .configure(device_id, &adapter.queue_families)
1238 .into_iter()
1239 .collect::<SmallVec<[_; 16]>>();
1240 let (create_queues, get_queues): (SmallVec<[_; 32]>, SmallVec<[_; 32]>) = families
1241 .iter()
1242 .map(|(index, priorities)| {
1243 (
1244 (&adapter.queue_families[index.index], priorities.as_ref()),
1245 (*index, priorities.as_ref().len()),
1246 )
1247 })
1248 .unzip();
1249
1250 log::debug!("Queues: {:#?}", get_queues);
1251
1252 let Gpu {
1253 device,
1254 mut queue_groups,
1255 } = unsafe {
1256 adapter
1257 .physical_device
1258 .open(&create_queues, adapter.physical_device.features())
1259 }?;
1260
1261 let families = unsafe {
1262 families_from_device(
1263 device_id,
1264 &mut queue_groups,
1265 get_queues,
1266 &adapter.queue_families,
1267 )
1268 };
1269 (device, families)
1270 };
1271
1272 let device = Device::from_raw(device, device_id);
1273
1274 let (types, heaps) = config
1275 .heaps
1276 .configure(&adapter.physical_device.memory_properties());
1277 let heaps = heaps.into_iter().collect::<SmallVec<[_; 16]>>();
1278 let types = types.into_iter().collect::<SmallVec<[_; 32]>>();
1279
1280 log::debug!("Heaps: {:#?}\nTypes: {:#?}", heaps, types);
1281
1282 let heaps = unsafe { Heaps::new(types, heaps) };
1283
1284 let epochs = families
1285 .as_slice()
1286 .iter()
1287 .map(|f| parking_lot::RwLock::new(vec![0; f.as_slice().len()]))
1288 .collect();
1289
1290 let factory = Factory {
1291 descriptor_allocator: ManuallyDrop::new(
1292 parking_lot::Mutex::new(DescriptorAllocator::new()),
1293 ),
1294 heaps: ManuallyDrop::new(parking_lot::Mutex::new(heaps)),
1295 resources: ManuallyDrop::new(ResourceHub::default()),
1296 uploader: unsafe { Uploader::new(&device, &families) }
1297 .map_err(rendy_core::hal::device::CreationError::OutOfMemory)?,
1298 blitter: unsafe { Blitter::new(&device, &families) }
1299 .map_err(rendy_core::hal::device::CreationError::OutOfMemory)?,
1300 families_indices: families.indices().into(),
1301 epochs,
1302 device,
1303 adapter,
1304 instance: InstanceOrId::Id(instance.id()),
1305 };
1306
1307 Ok((factory, families))
1308}