wgpu_core/device/
life.rs

1use crate::{
2    device::{
3        queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
4        DeviceError,
5    },
6    resource::{Buffer, Texture, Trackable},
7    snatch::SnatchGuard,
8    SubmissionIndex,
9};
10use smallvec::SmallVec;
11
12use std::sync::Arc;
13use thiserror::Error;
14
15/// A command submitted to the GPU for execution.
16///
17/// ## Keeping resources alive while the GPU is using them
18///
19/// [`wgpu_hal`] requires that, when a command is submitted to a queue, all the
20/// resources it uses must remain alive until it has finished executing.
21///
22/// [`wgpu_hal`]: hal
23/// [`ResourceInfo::submission_index`]: crate::resource::ResourceInfo
24struct ActiveSubmission {
25    /// The index of the submission we track.
26    ///
27    /// When `Device::fence`'s value is greater than or equal to this, our queue
28    /// submission has completed.
29    index: SubmissionIndex,
30
31    /// Buffers to be mapped once this submission has completed.
32    mapped: Vec<Arc<Buffer>>,
33
34    /// Command buffers used by this submission, and the encoder that owns them.
35    ///
36    /// [`wgpu_hal::Queue::submit`] requires the submitted command buffers to
37    /// remain alive until the submission has completed execution. Command
38    /// encoders double as allocation pools for command buffers, so holding them
39    /// here and cleaning them up in [`LifetimeTracker::triage_submissions`]
40    /// satisfies that requirement.
41    ///
42    /// Once this submission has completed, the command buffers are reset and
43    /// the command encoder is recycled.
44    ///
45    /// [`wgpu_hal::Queue::submit`]: hal::Queue::submit
46    encoders: Vec<EncoderInFlight>,
47
48    /// List of queue "on_submitted_work_done" closures to be called once this
49    /// submission has completed.
50    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
51}
52
53impl ActiveSubmission {
54    /// Returns true if this submission contains the given buffer.
55    ///
56    /// This only uses constant-time operations.
57    pub fn contains_buffer(&self, buffer: &Buffer) -> bool {
58        for encoder in &self.encoders {
59            // The ownership location of buffers depends on where the command encoder
60            // came from. If it is the staging command encoder on the queue, it is
61            // in the pending buffer list. If it came from a user command encoder,
62            // it is in the tracker.
63
64            if encoder.trackers.buffers.contains(buffer) {
65                return true;
66            }
67
68            if encoder
69                .pending_buffers
70                .contains_key(&buffer.tracker_index())
71            {
72                return true;
73            }
74        }
75
76        false
77    }
78
79    /// Returns true if this submission contains the given texture.
80    ///
81    /// This only uses constant-time operations.
82    pub fn contains_texture(&self, texture: &Texture) -> bool {
83        for encoder in &self.encoders {
84            // The ownership location of textures depends on where the command encoder
85            // came from. If it is the staging command encoder on the queue, it is
86            // in the pending buffer list. If it came from a user command encoder,
87            // it is in the tracker.
88
89            if encoder.trackers.textures.contains(texture) {
90                return true;
91            }
92
93            if encoder
94                .pending_textures
95                .contains_key(&texture.tracker_index())
96            {
97                return true;
98            }
99        }
100
101        false
102    }
103}
104
105#[derive(Clone, Debug, Error)]
106#[non_exhaustive]
107pub enum WaitIdleError {
108    #[error(transparent)]
109    Device(#[from] DeviceError),
110    #[error("Tried to wait using a submission index ({0}) that has not been returned by a successful submission (last successful submission: {1})")]
111    WrongSubmissionIndex(SubmissionIndex, SubmissionIndex),
112}
113
114/// Resource tracking for a device.
115///
116/// ## Host mapping buffers
117///
118/// A buffer cannot be mapped until all active queue submissions that use it
119/// have completed. To that end:
120///
121/// -   Each buffer's `ResourceInfo::submission_index` records the index of the
122///     most recent queue submission that uses that buffer.
123///
124/// -   When the device is polled, the following `LifetimeTracker` methods decide
125///     what should happen next:
126///
127///     1)  `triage_submissions` moves entries in `self.active[i]` for completed
128///         submissions to `self.ready_to_map`.  At this point, both
129///         `self.active` and `self.ready_to_map` are up to date with the given
130///         submission index.
131///
132///     2)  `handle_mapping` drains `self.ready_to_map` and actually maps the
133///         buffers, collecting a list of notification closures to call.
134///
135/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
136/// buffer. This new `Arc` is only dropped by `handle_mapping`.
137pub(crate) struct LifetimeTracker {
138    /// Resources used by queue submissions still in flight. One entry per
139    /// submission, with older submissions appearing before younger.
140    ///
141    /// Entries are added by `track_submission` and drained by
142    /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
143    /// to particular entries.
144    active: Vec<ActiveSubmission>,
145
146    /// Buffers the user has asked us to map, and which are not used by any
147    /// queue submission still in flight.
148    ready_to_map: Vec<Arc<Buffer>>,
149
150    /// Queue "on_submitted_work_done" closures that were initiated for while there is no
151    /// currently pending submissions. These cannot be immediately invoked as they
152    /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
153    /// here until the next time the device is maintained.
154    work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
155}
156
157impl LifetimeTracker {
158    pub fn new() -> Self {
159        Self {
160            active: Vec::new(),
161            ready_to_map: Vec::new(),
162            work_done_closures: SmallVec::new(),
163        }
164    }
165
166    /// Return true if there are no queue submissions still in flight.
167    pub fn queue_empty(&self) -> bool {
168        self.active.is_empty()
169    }
170
171    /// Start tracking resources associated with a new queue submission.
172    pub fn track_submission(&mut self, index: SubmissionIndex, encoders: Vec<EncoderInFlight>) {
173        self.active.push(ActiveSubmission {
174            index,
175            mapped: Vec::new(),
176            encoders,
177            work_done_closures: SmallVec::new(),
178        });
179    }
180
181    pub(crate) fn map(&mut self, buffer: &Arc<Buffer>) -> Option<SubmissionIndex> {
182        // Determine which buffers are ready to map, and which must wait for the GPU.
183        let submission = self
184            .active
185            .iter_mut()
186            .rev()
187            .find(|a| a.contains_buffer(buffer));
188
189        let maybe_submission_index = submission.as_ref().map(|s| s.index);
190
191        submission
192            .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
193            .push(buffer.clone());
194
195        maybe_submission_index
196    }
197
198    /// Returns the submission index of the most recent submission that uses the
199    /// given buffer.
200    pub fn get_buffer_latest_submission_index(&self, buffer: &Buffer) -> Option<SubmissionIndex> {
201        // We iterate in reverse order, so that we can bail out early as soon
202        // as we find a hit.
203        self.active.iter().rev().find_map(|submission| {
204            if submission.contains_buffer(buffer) {
205                Some(submission.index)
206            } else {
207                None
208            }
209        })
210    }
211
212    /// Returns the submission index of the most recent submission that uses the
213    /// given texture.
214    pub fn get_texture_latest_submission_index(
215        &self,
216        texture: &Texture,
217    ) -> Option<SubmissionIndex> {
218        // We iterate in reverse order, so that we can bail out early as soon
219        // as we find a hit.
220        self.active.iter().rev().find_map(|submission| {
221            if submission.contains_texture(texture) {
222                Some(submission.index)
223            } else {
224                None
225            }
226        })
227    }
228
229    /// Sort out the consequences of completed submissions.
230    ///
231    /// Assume that all submissions up through `last_done` have completed.
232    ///
233    /// -   Buffers used by those submissions are now ready to map, if requested.
234    ///     Add any buffers in the submission's [`mapped`] list to
235    ///     [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`]
236    ///     will find them.
237    ///
238    /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
239    ///
240    /// [`mapped`]: ActiveSubmission::mapped
241    /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
242    /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
243    #[must_use]
244    pub fn triage_submissions(
245        &mut self,
246        last_done: SubmissionIndex,
247    ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
248        profiling::scope!("triage_submissions");
249
250        //TODO: enable when `is_sorted_by_key` is stable
251        //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
252        let done_count = self
253            .active
254            .iter()
255            .position(|a| a.index > last_done)
256            .unwrap_or(self.active.len());
257
258        let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
259        for a in self.active.drain(..done_count) {
260            self.ready_to_map.extend(a.mapped);
261            for encoder in a.encoders {
262                // This involves actually decrementing the ref count of all command buffer
263                // resources, so can be _very_ expensive.
264                profiling::scope!("drop command buffer trackers");
265                drop(encoder);
266            }
267            work_done_closures.extend(a.work_done_closures);
268        }
269        work_done_closures
270    }
271
272    pub fn schedule_resource_destruction(
273        &mut self,
274        temp_resource: TempResource,
275        last_submit_index: SubmissionIndex,
276    ) {
277        let resources = self
278            .active
279            .iter_mut()
280            .find(|a| a.index == last_submit_index)
281            .map(|a| {
282                // Because this resource's `last_submit_index` matches `a.index`,
283                // we know that we must have done something with the resource,
284                // so `a.encoders` should not be empty.
285                &mut a.encoders.last_mut().unwrap().temp_resources
286            });
287        if let Some(resources) = resources {
288            resources.push(temp_resource);
289        }
290    }
291
292    pub fn add_work_done_closure(
293        &mut self,
294        closure: SubmittedWorkDoneClosure,
295    ) -> Option<SubmissionIndex> {
296        match self.active.last_mut() {
297            Some(active) => {
298                active.work_done_closures.push(closure);
299                Some(active.index)
300            }
301            // We must defer the closure until all previously occurring map_async closures
302            // have fired. This is required by the spec.
303            None => {
304                self.work_done_closures.push(closure);
305                None
306            }
307        }
308    }
309
310    /// Map the buffers in `self.ready_to_map`.
311    ///
312    /// Return a list of mapping notifications to send.
313    ///
314    /// See the documentation for [`LifetimeTracker`] for details.
315    #[must_use]
316    pub(crate) fn handle_mapping(
317        &mut self,
318        snatch_guard: &SnatchGuard,
319    ) -> Vec<super::BufferMapPendingClosure> {
320        if self.ready_to_map.is_empty() {
321            return Vec::new();
322        }
323        let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
324            Vec::with_capacity(self.ready_to_map.len());
325
326        for buffer in self.ready_to_map.drain(..) {
327            match buffer.map(snatch_guard) {
328                Some(cb) => pending_callbacks.push(cb),
329                None => continue,
330            }
331        }
332        pending_callbacks
333    }
334}