rendy_descriptor/
allocator.rs

1use {
2    crate::ranges::*,
3    gfx_hal::{
4        device::{Device, OutOfMemory},
5        pso::{AllocationError, DescriptorPool as _, DescriptorPoolCreateFlags},
6        Backend,
7    },
8    smallvec::{smallvec, SmallVec},
9    std::{
10        collections::{HashMap, VecDeque},
11        ops::Deref,
12    },
13};
14
15const MIN_SETS: u32 = 64;
16const MAX_SETS: u32 = 512;
17
18/// Descriptor set from allocator.
19#[derive(Debug)]
20pub struct DescriptorSet<B: Backend> {
21    raw: B::DescriptorSet,
22    pool: u64,
23    ranges: DescriptorRanges,
24}
25
26impl<B> DescriptorSet<B>
27where
28    B: Backend,
29{
30    /// Get raw set
31    pub fn raw(&self) -> &B::DescriptorSet {
32        &self.raw
33    }
34
35    /// Get raw set
36    /// It must not be replaced.
37    pub unsafe fn raw_mut(&mut self) -> &mut B::DescriptorSet {
38        &mut self.raw
39    }
40}
41
42impl<B> Deref for DescriptorSet<B>
43where
44    B: Backend,
45{
46    type Target = B::DescriptorSet;
47
48    fn deref(&self) -> &B::DescriptorSet {
49        &self.raw
50    }
51}
52
53#[derive(Debug)]
54struct Allocation<B: Backend> {
55    sets: SmallVec<[B::DescriptorSet; 1]>,
56    pools: Vec<u64>,
57}
58
59#[derive(Debug)]
60struct DescriptorPool<B: Backend> {
61    raw: B::DescriptorPool,
62    size: u32,
63
64    // Number of free sets left.
65    free: u32,
66
67    // Number of sets freed (they can't be reused until gfx-hal 0.2)
68    freed: u32,
69}
70
71unsafe fn allocate_from_pool<B: Backend>(
72    raw: &mut B::DescriptorPool,
73    layout: &B::DescriptorSetLayout,
74    count: u32,
75    allocation: &mut SmallVec<[B::DescriptorSet; 1]>,
76) -> Result<(), OutOfMemory> {
77    let sets_were = allocation.len();
78    raw.allocate_sets(std::iter::repeat(layout).take(count as usize), allocation)
79        .map_err(|err| match err {
80            AllocationError::Host => OutOfMemory::Host,
81            AllocationError::Device => OutOfMemory::Device,
82            err => {
83                // We check pool for free descriptors and sets before calling this function,
84                // so it can't be exhausted.
85                // And it can't be fragmented either according to spec
86                //
87                // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html#VkDescriptorPoolCreateInfo
88                //
89                // """
90                // Additionally, if all sets allocated from the pool since it was created or most recently reset
91                // use the same number of descriptors (of each type) and the requested allocation also
92                // uses that same number of descriptors (of each type), then fragmentation must not cause an allocation failure
93                // """
94                panic!("Unexpected error: {:?}", err);
95            }
96        })?;
97    assert_eq!(allocation.len(), sets_were + count as usize);
98    Ok(())
99}
100
101#[derive(Debug)]
102struct DescriptorBucket<B: Backend> {
103    pools_offset: u64,
104    pools: VecDeque<DescriptorPool<B>>,
105    total: u64,
106}
107
108impl<B> DescriptorBucket<B>
109where
110    B: Backend,
111{
112    fn new() -> Self {
113        DescriptorBucket {
114            pools_offset: 0,
115            pools: VecDeque::new(),
116            total: 0,
117        }
118    }
119
120    fn new_pool_size(&self, count: u32) -> u32 {
121        MIN_SETS // at least MIN_SETS
122            .max(count) // at least enough for allocation
123            .max(self.total.min(MAX_SETS as u64) as u32) // at least as much as was allocated so far capped to MAX_SETS
124            .next_power_of_two() // rounded up to nearest 2^N
125    }
126
127    unsafe fn dispose(mut self, device: &B::Device) {
128        if self.total > 0 {
129            log::error!("Not all descriptor sets were deallocated");
130        }
131
132        while let Some(pool) = self.pools.pop_front() {
133            assert!(pool.freed + pool.free <= pool.size);
134            if pool.freed + pool.free < pool.size {
135                log::error!(
136                    "Descriptor pool is still in use during allocator disposal. {:?}",
137                    pool
138                );
139            } else {
140                log::trace!("Destroying used up descriptor pool");
141                device.destroy_descriptor_pool(pool.raw);
142                self.pools_offset += 1;
143            }
144        }
145
146        self.pools
147            .drain(..)
148            .for_each(|pool| device.destroy_descriptor_pool(pool.raw));
149    }
150
151    unsafe fn allocate(
152        &mut self,
153        device: &B::Device,
154        layout: &B::DescriptorSetLayout,
155        layout_ranges: DescriptorRanges,
156        mut count: u32,
157        allocation: &mut Allocation<B>,
158    ) -> Result<(), OutOfMemory> {
159        if count == 0 {
160            return Ok(());
161        }
162
163        for (index, pool) in self.pools.iter_mut().enumerate().rev() {
164            if pool.free == 0 {
165                continue;
166            }
167
168            let allocate = pool.free.min(count);
169            log::trace!("Allocate {} from exising pool", allocate);
170            allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
171            allocation.pools.extend(
172                std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
173            );
174            count -= allocate;
175            pool.free -= allocate;
176            self.total += allocate as u64;
177
178            if count == 0 {
179                return Ok(());
180            }
181        }
182
183        while count > 0 {
184            let size = self.new_pool_size(count);
185            let pool_ranges = layout_ranges * size;
186            log::trace!(
187                "Create new pool with {} sets and {:?} descriptors",
188                size,
189                pool_ranges,
190            );
191            let raw = device.create_descriptor_pool(
192                size as usize,
193                &pool_ranges,
194                DescriptorPoolCreateFlags::empty(),
195            )?;
196            let allocate = size.min(count);
197
198            self.pools.push_back(DescriptorPool {
199                raw,
200                size,
201                free: size,
202                freed: 0,
203            });
204            let index = self.pools.len() - 1;
205            let pool = self.pools.back_mut().unwrap();
206
207            allocate_from_pool::<B>(&mut pool.raw, layout, allocate, &mut allocation.sets)?;
208            allocation.pools.extend(
209                std::iter::repeat(index as u64 + self.pools_offset).take(allocate as usize),
210            );
211
212            count -= allocate;
213            pool.free -= allocate;
214            self.total += allocate as u64;
215        }
216
217        Ok(())
218    }
219
220    unsafe fn free(&mut self, sets: impl IntoIterator<Item = B::DescriptorSet>, pool: u64) {
221        let pool = &mut self.pools[(pool - self.pools_offset) as usize];
222        let freed = sets.into_iter().count() as u32;
223        pool.freed += freed;
224        self.total -= freed as u64;
225        log::trace!("Freed {} from descriptor bucket", freed);
226    }
227
228    unsafe fn cleanup(&mut self, device: &B::Device) {
229        while let Some(pool) = self.pools.pop_front() {
230            if pool.freed < pool.size {
231                self.pools.push_front(pool);
232                break;
233            }
234            log::trace!("Destroying used up descriptor pool");
235            device.destroy_descriptor_pool(pool.raw);
236            self.pools_offset += 1;
237        }
238    }
239}
240
241/// Descriptor allocator.
242/// Can be used to allocate descriptor sets for any layout.
243#[derive(Debug)]
244pub struct DescriptorAllocator<B: Backend> {
245    buckets: HashMap<DescriptorRanges, DescriptorBucket<B>>,
246    allocation: Allocation<B>,
247    relevant: relevant::Relevant,
248    total: u64,
249}
250
251impl<B> DescriptorAllocator<B>
252where
253    B: Backend,
254{
255    /// Create new allocator instance.
256    pub fn new() -> Self {
257        DescriptorAllocator {
258            buckets: HashMap::new(),
259            allocation: Allocation {
260                sets: SmallVec::new(),
261                pools: Vec::new(),
262            },
263            relevant: relevant::Relevant,
264            total: 0,
265        }
266    }
267
268    /// Destroy allocator instance.
269    /// All sets allocated from this allocator become invalid.
270    pub unsafe fn dispose(mut self, device: &B::Device) {
271        self.buckets
272            .drain()
273            .for_each(|(_, bucket)| bucket.dispose(device));
274        self.relevant.dispose();
275    }
276
277    /// Allocate descriptor set with specified layout.
278    /// `DescriptorRanges` must match descriptor numbers of the layout.
279    /// `DescriptorRanges` can be constructed [from bindings] that were used
280    /// to create layout instance.
281    ///
282    /// [from bindings]: .
283    pub unsafe fn allocate(
284        &mut self,
285        device: &B::Device,
286        layout: &B::DescriptorSetLayout,
287        layout_ranges: DescriptorRanges,
288        count: u32,
289        extend: &mut impl Extend<DescriptorSet<B>>,
290    ) -> Result<(), OutOfMemory> {
291        if count == 0 {
292            return Ok(());
293        }
294
295        log::trace!(
296            "Allocating {} sets with layout {:?} @ {:?}",
297            count,
298            layout,
299            layout_ranges
300        );
301
302        let bucket = self
303            .buckets
304            .entry(layout_ranges)
305            .or_insert_with(|| DescriptorBucket::new());
306        match bucket.allocate(device, layout, layout_ranges, count, &mut self.allocation) {
307            Ok(()) => {
308                extend.extend(
309                    Iterator::zip(
310                        self.allocation.pools.drain(..),
311                        self.allocation.sets.drain(),
312                    )
313                    .map(|(pool, set)| DescriptorSet {
314                        raw: set,
315                        ranges: layout_ranges,
316                        pool,
317                    }),
318                );
319                Ok(())
320            }
321            Err(err) => {
322                // Free sets allocated so far.
323                let mut last = None;
324                for (index, pool) in self.allocation.pools.drain(..).enumerate().rev() {
325                    match last {
326                        Some(last) if last == pool => {
327                            // same pool, continue
328                        }
329                        Some(last) => {
330                            let sets = &mut self.allocation.sets;
331                            // Free contiguous range of sets from one pool in one go.
332                            bucket.free((index + 1..sets.len()).map(|_| sets.pop().unwrap()), last);
333                        }
334                        None => last = Some(pool),
335                    }
336                }
337
338                if let Some(last) = last {
339                    bucket.free(self.allocation.sets.drain(), last);
340                }
341
342                Err(err)
343            }
344        }
345    }
346
347    /// Free descriptor sets.
348    ///
349    /// # Safety
350    ///
351    /// None of descriptor sets can be referenced in any pending command buffers.
352    /// All command buffers where at least one of descriptor sets referenced
353    /// move to invalid state.
354    pub unsafe fn free(&mut self, all_sets: impl IntoIterator<Item = DescriptorSet<B>>) {
355        let mut free: Option<(DescriptorRanges, u64, SmallVec<[B::DescriptorSet; 32]>)> = None;
356
357        // Collect contig
358        for set in all_sets {
359            match &mut free {
360                slot @ None => {
361                    slot.replace((set.ranges, set.pool, smallvec![set.raw]));
362                }
363                Some((ranges, pool, raw_sets)) if *ranges == set.ranges && *pool == set.pool => {
364                    raw_sets.push(set.raw);
365                }
366                Some((ranges, pool, raw_sets)) => {
367                    let bucket = self
368                        .buckets
369                        .get_mut(ranges)
370                        .expect("Set should be allocated from this allocator");
371                    debug_assert!(bucket.total >= raw_sets.len() as u64);
372
373                    bucket.free(raw_sets.drain(), *pool);
374                    *pool = set.pool;
375                    *ranges = set.ranges;
376                    raw_sets.push(set.raw);
377                }
378            }
379        }
380
381        if let Some((ranges, pool, raw_sets)) = free {
382            let bucket = self
383                .buckets
384                .get_mut(&ranges)
385                .expect("Set should be allocated from this allocator");
386            debug_assert!(bucket.total >= raw_sets.len() as u64);
387
388            bucket.free(raw_sets, pool);
389        }
390    }
391
392    /// Perform cleanup to allow resources reuse.
393    pub unsafe fn cleanup(&mut self, device: &B::Device) {
394        self.buckets
395            .values_mut()
396            .for_each(|bucket| bucket.cleanup(device));
397    }
398}