use crate::{
binding_model, command, conv,
device::life::WaitIdleError,
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Hub, Input, InvalidId, Storage, Token},
id,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
},
instance::{self, Adapter, Surface},
pipeline, present,
resource::{self, BufferAccessResult, BufferMapState, TextureViewNotRenderableReason},
resource::{BufferAccessError, BufferMapOperation},
track::{BindGroupStates, TextureSelector, Tracker},
validation::{self, check_buffer_usage, check_texture_usage},
FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored,
SubmissionIndex, DOWNLEVEL_ERROR_MESSAGE,
};
use arrayvec::ArrayVec;
use hal::{CommandEncoder as _, Device as _};
use parking_lot::{Mutex, MutexGuard};
use smallvec::SmallVec;
use thiserror::Error;
use wgt::{BufferAddress, TextureFormat, TextureSampleType, TextureViewDimension};
use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, ptr};
mod life;
pub mod queue;
#[cfg(any(feature = "trace", feature = "replay"))]
pub mod trace;
pub const SHADER_STAGE_COUNT: usize = 3;
pub(crate) const ZERO_BUFFER_SIZE: BufferAddress = 512 << 10;
const CLEANUP_WAIT_MS: u32 = 5000;
const IMPLICIT_FAILURE: &str = "failed implicit";
const EP_FAILURE: &str = "EP is invalid";
pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum HostMap {
Read,
Write,
}
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct AttachmentData<T> {
pub colors: ArrayVec<Option<T>, { hal::MAX_COLOR_ATTACHMENTS }>,
pub resolves: ArrayVec<T, { hal::MAX_COLOR_ATTACHMENTS }>,
pub depth_stencil: Option<T>,
}
impl<T: PartialEq> Eq for AttachmentData<T> {}
impl<T> AttachmentData<T> {
pub(crate) fn map<U, F: Fn(&T) -> U>(&self, fun: F) -> AttachmentData<U> {
AttachmentData {
colors: self.colors.iter().map(|c| c.as_ref().map(&fun)).collect(),
resolves: self.resolves.iter().map(&fun).collect(),
depth_stencil: self.depth_stencil.as_ref().map(&fun),
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum RenderPassCompatibilityCheckType {
RenderPipeline,
RenderBundle,
}
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub(crate) struct RenderPassContext {
pub attachments: AttachmentData<TextureFormat>,
pub sample_count: u32,
pub multiview: Option<NonZeroU32>,
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum RenderPassCompatibilityError {
#[error(
"Incompatible color attachments at indices {indices:?}: the RenderPass uses textures with formats {expected:?} but the {ty:?} uses attachments with formats {actual:?}",
)]
IncompatibleColorAttachment {
indices: Vec<usize>,
expected: Vec<Option<TextureFormat>>,
actual: Vec<Option<TextureFormat>>,
ty: RenderPassCompatibilityCheckType,
},
#[error(
"Incompatible depth-stencil attachment format: the RenderPass uses a texture with format {expected:?} but the {ty:?} uses an attachment with format {actual:?}",
)]
IncompatibleDepthStencilAttachment {
expected: Option<TextureFormat>,
actual: Option<TextureFormat>,
ty: RenderPassCompatibilityCheckType,
},
#[error(
"Incompatible sample count: the RenderPass uses textures with sample count {expected:?} but the {ty:?} uses attachments with format {actual:?}",
)]
IncompatibleSampleCount {
expected: u32,
actual: u32,
ty: RenderPassCompatibilityCheckType,
},
#[error("Incompatible multiview setting: the RenderPass uses setting {expected:?} but the {ty:?} uses setting {actual:?}")]
IncompatibleMultiview {
expected: Option<NonZeroU32>,
actual: Option<NonZeroU32>,
ty: RenderPassCompatibilityCheckType,
},
}
impl RenderPassContext {
pub(crate) fn check_compatible(
&self,
other: &Self,
ty: RenderPassCompatibilityCheckType,
) -> Result<(), RenderPassCompatibilityError> {
if self.attachments.colors != other.attachments.colors {
let indices = self
.attachments
.colors
.iter()
.zip(&other.attachments.colors)
.enumerate()
.filter_map(|(idx, (left, right))| (left != right).then_some(idx))
.collect();
return Err(RenderPassCompatibilityError::IncompatibleColorAttachment {
indices,
expected: self.attachments.colors.iter().cloned().collect(),
actual: other.attachments.colors.iter().cloned().collect(),
ty,
});
}
if self.attachments.depth_stencil != other.attachments.depth_stencil {
return Err(
RenderPassCompatibilityError::IncompatibleDepthStencilAttachment {
expected: self.attachments.depth_stencil,
actual: other.attachments.depth_stencil,
ty,
},
);
}
if self.sample_count != other.sample_count {
return Err(RenderPassCompatibilityError::IncompatibleSampleCount {
expected: self.sample_count,
actual: other.sample_count,
ty,
});
}
if self.multiview != other.multiview {
return Err(RenderPassCompatibilityError::IncompatibleMultiview {
expected: self.multiview,
actual: other.multiview,
ty,
});
}
Ok(())
}
}
pub type BufferMapPendingClosure = (BufferMapOperation, BufferAccessResult);
#[derive(Default)]
pub struct UserClosures {
pub mappings: Vec<BufferMapPendingClosure>,
pub submissions: SmallVec<[queue::SubmittedWorkDoneClosure; 1]>,
}
impl UserClosures {
fn extend(&mut self, other: Self) {
self.mappings.extend(other.mappings);
self.submissions.extend(other.submissions);
}
fn fire(self) {
for (operation, status) in self.mappings {
operation.callback.call(status);
}
for closure in self.submissions {
closure.call();
}
}
}
fn map_buffer<A: hal::Api>(
raw: &A::Device,
buffer: &mut resource::Buffer<A>,
offset: BufferAddress,
size: BufferAddress,
kind: HostMap,
) -> Result<ptr::NonNull<u8>, BufferAccessError> {
let mapping = unsafe {
raw.map_buffer(buffer.raw.as_ref().unwrap(), offset..offset + size)
.map_err(DeviceError::from)?
};
buffer.sync_mapped_writes = match kind {
HostMap::Read if !mapping.is_coherent => unsafe {
raw.invalidate_mapped_ranges(
buffer.raw.as_ref().unwrap(),
iter::once(offset..offset + size),
);
None
},
HostMap::Write if !mapping.is_coherent => Some(offset..offset + size),
_ => None,
};
assert_eq!(offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
assert_eq!(size % wgt::COPY_BUFFER_ALIGNMENT, 0);
let zero_init_needs_flush_now = mapping.is_coherent && buffer.sync_mapped_writes.is_none();
let mapped = unsafe { std::slice::from_raw_parts_mut(mapping.ptr.as_ptr(), size as usize) };
for uninitialized in buffer.initialization_status.drain(offset..(size + offset)) {
let fill_range =
(uninitialized.start - offset) as usize..(uninitialized.end - offset) as usize;
mapped[fill_range].fill(0);
if zero_init_needs_flush_now {
unsafe {
raw.flush_mapped_ranges(buffer.raw.as_ref().unwrap(), iter::once(uninitialized))
};
}
}
Ok(mapping.ptr)
}
struct CommandAllocator<A: hal::Api> {
free_encoders: Vec<A::CommandEncoder>,
}
impl<A: hal::Api> CommandAllocator<A> {
fn acquire_encoder(
&mut self,
device: &A::Device,
queue: &A::Queue,
) -> Result<A::CommandEncoder, hal::DeviceError> {
match self.free_encoders.pop() {
Some(encoder) => Ok(encoder),
None => unsafe {
let hal_desc = hal::CommandEncoderDescriptor { label: None, queue };
device.create_command_encoder(&hal_desc)
},
}
}
fn release_encoder(&mut self, encoder: A::CommandEncoder) {
self.free_encoders.push(encoder);
}
fn dispose(self, device: &A::Device) {
log::info!("Destroying {} command encoders", self.free_encoders.len());
for cmd_encoder in self.free_encoders {
unsafe {
device.destroy_command_encoder(cmd_encoder);
}
}
}
}
pub struct Device<A: HalApi> {
pub(crate) raw: A::Device,
pub(crate) adapter_id: Stored<id::AdapterId>,
pub(crate) queue: A::Queue,
pub(crate) zero_buffer: A::Buffer,
pub(crate) life_guard: LifeGuard,
ref_count: RefCount,
command_allocator: Mutex<CommandAllocator<A>>,
pub(crate) active_submission_index: SubmissionIndex,
fence: A::Fence,
pub(crate) trackers: Mutex<Tracker<A>>,
life_tracker: Mutex<life::LifetimeTracker<A>>,
temp_suspected: life::SuspectedResources,
pub(crate) alignments: hal::Alignments,
pub(crate) limits: wgt::Limits,
pub(crate) features: wgt::Features,
pub(crate) downlevel: wgt::DownlevelCapabilities,
pending_writes: queue::PendingWrites<A>,
#[cfg(feature = "trace")]
pub(crate) trace: Option<Mutex<trace::Trace>>,
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum CreateDeviceError {
#[error("Not enough memory left")]
OutOfMemory,
#[error("Failed to create internal buffer for initializing textures")]
FailedToCreateZeroBuffer(#[from] DeviceError),
}
impl<A: HalApi> Device<A> {
pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
if self.features.contains(feature) {
Ok(())
} else {
Err(MissingFeatures(feature))
}
}
pub(crate) fn require_downlevel_flags(
&self,
flags: wgt::DownlevelFlags,
) -> Result<(), MissingDownlevelFlags> {
if self.downlevel.flags.contains(flags) {
Ok(())
} else {
Err(MissingDownlevelFlags(flags))
}
}
}
impl<A: HalApi> Device<A> {
pub(crate) fn new(
open: hal::OpenDevice<A>,
adapter_id: Stored<id::AdapterId>,
alignments: hal::Alignments,
downlevel: wgt::DownlevelCapabilities,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
) -> Result<Self, CreateDeviceError> {
#[cfg(not(feature = "trace"))]
if let Some(_) = trace_path {
log::error!("Feature 'trace' is not enabled");
}
let fence =
unsafe { open.device.create_fence() }.map_err(|_| CreateDeviceError::OutOfMemory)?;
let mut com_alloc = CommandAllocator {
free_encoders: Vec::new(),
};
let pending_encoder = com_alloc
.acquire_encoder(&open.device, &open.queue)
.map_err(|_| CreateDeviceError::OutOfMemory)?;
let mut pending_writes = queue::PendingWrites::<A>::new(pending_encoder);
let zero_buffer = unsafe {
open.device
.create_buffer(&hal::BufferDescriptor {
label: Some("(wgpu internal) zero init buffer"),
size: ZERO_BUFFER_SIZE,
usage: hal::BufferUses::COPY_SRC | hal::BufferUses::COPY_DST,
memory_flags: hal::MemoryFlags::empty(),
})
.map_err(DeviceError::from)?
};
pending_writes.activate();
unsafe {
pending_writes
.command_encoder
.transition_buffers(iter::once(hal::BufferBarrier {
buffer: &zero_buffer,
usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST,
}));
pending_writes
.command_encoder
.clear_buffer(&zero_buffer, 0..ZERO_BUFFER_SIZE);
pending_writes
.command_encoder
.transition_buffers(iter::once(hal::BufferBarrier {
buffer: &zero_buffer,
usage: hal::BufferUses::COPY_DST..hal::BufferUses::COPY_SRC,
}));
}
let life_guard = LifeGuard::new("<device>");
let ref_count = life_guard.add_ref();
Ok(Self {
raw: open.device,
adapter_id,
queue: open.queue,
zero_buffer,
life_guard,
ref_count,
command_allocator: Mutex::new(com_alloc),
active_submission_index: 0,
fence,
trackers: Mutex::new(Tracker::new()),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: life::SuspectedResources::default(),
#[cfg(feature = "trace")]
trace: trace_path.and_then(|path| match trace::Trace::new(path) {
Ok(mut trace) => {
trace.add(trace::Action::Init {
desc: desc.clone(),
backend: A::VARIANT,
});
Some(Mutex::new(trace))
}
Err(e) => {
log::error!("Unable to start a trace in '{:?}': {:?}", path, e);
None
}
}),
alignments,
limits: desc.limits.clone(),
features: desc.features,
downlevel,
pending_writes,
})
}
fn lock_life<'this, 'token: 'this>(
&'this self,
_token: &mut Token<'token, Self>,
) -> MutexGuard<'this, life::LifetimeTracker<A>> {
self.life_tracker.lock()
}
fn maintain<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this self,
hub: &Hub<A, G>,
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
token: &mut Token<'token, Self>,
) -> Result<(UserClosures, bool), WaitIdleError> {
profiling::scope!("Device::maintain");
let mut life_tracker = self.lock_life(token);
life_tracker
.suspected_resources
.extend(&self.temp_suspected);
life_tracker.triage_suspected(
hub,
&self.trackers,
#[cfg(feature = "trace")]
self.trace.as_ref(),
token,
);
life_tracker.triage_mapped(hub, token);
let last_done_index = if maintain.is_wait() {
let index_to_wait_for = match maintain {
wgt::Maintain::WaitForSubmissionIndex(submission_index) => {
submission_index.index
}
_ => self.active_submission_index,
};
unsafe {
self.raw
.wait(&self.fence, index_to_wait_for, CLEANUP_WAIT_MS)
.map_err(DeviceError::from)?
};
index_to_wait_for
} else {
unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
}
};
let submission_closures =
life_tracker.triage_submissions(last_done_index, &self.command_allocator);
let mapping_closures = life_tracker.handle_mapping(hub, &self.raw, &self.trackers, token);
life_tracker.cleanup(&self.raw);
let closures = UserClosures {
mappings: mapping_closures,
submissions: submission_closures,
};
Ok((closures, life_tracker.queue_empty()))
}
fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this mut self,
hub: &Hub<A, G>,
trackers: &Tracker<A>,
token: &mut Token<'token, Self>,
) {
self.temp_suspected.clear();
{
let (bind_group_guard, mut token) = hub.bind_groups.read(token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
for id in trackers.buffers.used() {
if buffer_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.buffers.push(id);
}
}
for id in trackers.textures.used() {
if texture_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.textures.push(id);
}
}
for id in trackers.views.used() {
if texture_view_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.texture_views.push(id);
}
}
for id in trackers.bind_groups.used() {
if bind_group_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.bind_groups.push(id);
}
}
for id in trackers.samplers.used() {
if sampler_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.samplers.push(id);
}
}
for id in trackers.compute_pipelines.used() {
if compute_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.compute_pipelines.push(id);
}
}
for id in trackers.render_pipelines.used() {
if render_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.render_pipelines.push(id);
}
}
for id in trackers.query_sets.used() {
if query_set_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.query_sets.push(id);
}
}
}
self.lock_life(token)
.suspected_resources
.extend(&self.temp_suspected);
self.temp_suspected.clear();
}
fn create_buffer(
&self,
self_id: id::DeviceId,
desc: &resource::BufferDescriptor,
transient: bool,
) -> Result<resource::Buffer<A>, resource::CreateBufferError> {
debug_assert_eq!(self_id.backend(), A::VARIANT);
if desc.size > self.limits.max_buffer_size {
return Err(resource::CreateBufferError::MaxBufferSize {
requested: desc.size,
maximum: self.limits.max_buffer_size,
});
}
if desc.usage.contains(wgt::BufferUsages::INDEX)
&& desc.usage.contains(
wgt::BufferUsages::VERTEX
| wgt::BufferUsages::UNIFORM
| wgt::BufferUsages::INDIRECT
| wgt::BufferUsages::STORAGE,
)
{
self.require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER)?;
}
let mut usage = conv::map_buffer_usage(desc.usage);
if desc.usage.is_empty() || desc.usage.contains_invalid_bits() {
return Err(resource::CreateBufferError::InvalidUsage(desc.usage));
}
if !self
.features
.contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
{
use wgt::BufferUsages as Bu;
let write_mismatch = desc.usage.contains(Bu::MAP_WRITE)
&& !(Bu::MAP_WRITE | Bu::COPY_SRC).contains(desc.usage);
let read_mismatch = desc.usage.contains(Bu::MAP_READ)
&& !(Bu::MAP_READ | Bu::COPY_DST).contains(desc.usage);
if write_mismatch || read_mismatch {
return Err(resource::CreateBufferError::UsageMismatch(desc.usage));
}
}
if desc.mapped_at_creation {
if desc.size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(resource::CreateBufferError::UnalignedSize);
}
if !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
usage |= hal::BufferUses::COPY_DST;
}
} else {
usage |= hal::BufferUses::COPY_DST;
}
let actual_size = if desc.size == 0 {
wgt::COPY_BUFFER_ALIGNMENT
} else if desc.usage.contains(wgt::BufferUsages::VERTEX) {
desc.size + 1
} else {
desc.size
};
let clear_remainder = actual_size % wgt::COPY_BUFFER_ALIGNMENT;
let aligned_size = if clear_remainder != 0 {
actual_size + wgt::COPY_BUFFER_ALIGNMENT - clear_remainder
} else {
actual_size
};
let mut memory_flags = hal::MemoryFlags::empty();
memory_flags.set(hal::MemoryFlags::TRANSIENT, transient);
let hal_desc = hal::BufferDescriptor {
label: desc.label.borrow_option(),
size: aligned_size,
usage,
memory_flags,
};
let buffer = unsafe { self.raw.create_buffer(&hal_desc) }.map_err(DeviceError::from)?;
Ok(resource::Buffer {
raw: Some(buffer),
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
usage: desc.usage,
size: desc.size,
initialization_status: BufferInitTracker::new(desc.size),
sync_mapped_writes: None,
map_state: resource::BufferMapState::Idle,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
})
}
fn create_texture_from_hal(
&self,
hal_texture: A::Texture,
hal_usage: hal::TextureUses,
self_id: id::DeviceId,
desc: &resource::TextureDescriptor,
format_features: wgt::TextureFormatFeatures,
clear_mode: resource::TextureClearMode<A>,
) -> resource::Texture<A> {
debug_assert_eq!(self_id.backend(), A::VARIANT);
resource::Texture {
inner: resource::TextureInner::Native {
raw: Some(hal_texture),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
desc: desc.map_label(|_| ()),
hal_usage,
format_features,
initialization_status: TextureInitTracker::new(
desc.mip_level_count,
desc.array_layer_count(),
),
full_range: TextureSelector {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
clear_mode,
}
}
fn create_texture(
&self,
self_id: id::DeviceId,
adapter: &Adapter<A>,
desc: &resource::TextureDescriptor,
) -> Result<resource::Texture<A>, resource::CreateTextureError> {
use resource::{CreateTextureError, TextureDimensionError};
if desc.usage.is_empty() || desc.usage.contains_invalid_bits() {
return Err(CreateTextureError::InvalidUsage(desc.usage));
}
conv::check_texture_dimension_size(
desc.dimension,
desc.size,
desc.sample_count,
&self.limits,
)?;
if desc.dimension != wgt::TextureDimension::D2 {
if desc.format.is_depth_stencil_format() {
return Err(CreateTextureError::InvalidDepthDimension(
desc.dimension,
desc.format,
));
}
if desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::InvalidDimensionUsages(
wgt::TextureUsages::RENDER_ATTACHMENT,
desc.dimension,
));
}
if desc.format.is_compressed() {
return Err(CreateTextureError::InvalidCompressedDimension(
desc.dimension,
desc.format,
));
}
}
if desc.format.is_compressed() {
let (block_width, block_height) = desc.format.block_dimensions();
if desc.size.width % block_width != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockWidth {
width: desc.size.width,
block_width,
format: desc.format,
},
));
}
if desc.size.height % block_height != 0 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::NotMultipleOfBlockHeight {
height: desc.size.height,
block_height,
format: desc.format,
},
));
}
}
let format_features = self
.describe_format_features(adapter, desc.format)
.map_err(|error| CreateTextureError::MissingFeatures(desc.format, error))?;
if desc.sample_count > 1 {
if desc.mip_level_count != 1 {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: desc.mip_level_count,
maximum: 1,
});
}
if desc.size.depth_or_array_layers != 1 {
return Err(CreateTextureError::InvalidDimension(
TextureDimensionError::MultisampledDepthOrArrayLayer(
desc.size.depth_or_array_layers,
),
));
}
if desc.usage.contains(wgt::TextureUsages::STORAGE_BINDING) {
return Err(CreateTextureError::InvalidMultisampledStorageBinding);
}
if !desc.usage.contains(wgt::TextureUsages::RENDER_ATTACHMENT) {
return Err(CreateTextureError::MultisampledNotRenderAttachment);
}
if !format_features.flags.intersects(
wgt::TextureFormatFeatureFlags::MULTISAMPLE_X4
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X2
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X8
| wgt::TextureFormatFeatureFlags::MULTISAMPLE_X16,
) {
return Err(CreateTextureError::InvalidMultisampledFormat(desc.format));
}
if !format_features
.flags
.sample_count_supported(desc.sample_count)
{
return Err(CreateTextureError::InvalidSampleCount(
desc.sample_count,
desc.format,
));
};
}
let mips = desc.mip_level_count;
let max_levels_allowed = desc.size.max_mips(desc.dimension).min(hal::MAX_MIP_LEVELS);
if mips == 0 || mips > max_levels_allowed {
return Err(CreateTextureError::InvalidMipLevelCount {
requested: mips,
maximum: max_levels_allowed,
});
}
let missing_allowed_usages = desc.usage - format_features.allowed_usages;
if !missing_allowed_usages.is_empty() {
let wgpu_allowed_usages = desc
.format
.guaranteed_format_features(self.features)
.allowed_usages;
let wgpu_missing_usages = desc.usage - wgpu_allowed_usages;
return Err(CreateTextureError::InvalidFormatUsages(
missing_allowed_usages,
desc.format,
wgpu_missing_usages.is_empty(),
));
}
let mut hal_view_formats = vec![];
for format in desc.view_formats.iter() {
if desc.format == *format {
continue;
}
if desc.format.remove_srgb_suffix() != format.remove_srgb_suffix() {
return Err(CreateTextureError::InvalidViewFormat(*format, desc.format));
}
hal_view_formats.push(*format);
}
if !hal_view_formats.is_empty() {
self.require_downlevel_flags(wgt::DownlevelFlags::VIEW_FORMATS)?;
}
let hal_usage = conv::map_texture_usage(desc.usage, desc.format.into())
| if desc.format.is_depth_stencil_format() {
hal::TextureUses::DEPTH_STENCIL_WRITE
} else if desc.usage.contains(wgt::TextureUsages::COPY_DST) {
hal::TextureUses::COPY_DST } else {
if format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
&& desc.dimension == wgt::TextureDimension::D2
{
hal::TextureUses::COLOR_TARGET
} else {
hal::TextureUses::COPY_DST
}
};
let hal_desc = hal::TextureDescriptor {
label: desc.label.borrow_option(),
size: desc.size,
mip_level_count: desc.mip_level_count,
sample_count: desc.sample_count,
dimension: desc.dimension,
format: desc.format,
usage: hal_usage,
memory_flags: hal::MemoryFlags::empty(),
view_formats: hal_view_formats,
};
let raw_texture = unsafe {
self.raw
.create_texture(&hal_desc)
.map_err(DeviceError::from)?
};
let clear_mode = if hal_usage
.intersects(hal::TextureUses::DEPTH_STENCIL_WRITE | hal::TextureUses::COLOR_TARGET)
{
let (is_color, usage) = if desc.format.is_depth_stencil_format() {
(false, hal::TextureUses::DEPTH_STENCIL_WRITE)
} else {
(true, hal::TextureUses::COLOR_TARGET)
};
let dimension = match desc.dimension {
wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1,
wgt::TextureDimension::D2 => wgt::TextureViewDimension::D2,
wgt::TextureDimension::D3 => unreachable!(),
};
let mut clear_views = SmallVec::new();
for mip_level in 0..desc.mip_level_count {
for array_layer in 0..desc.size.depth_or_array_layers {
let desc = hal::TextureViewDescriptor {
label: Some("(wgpu internal) clear texture view"),
format: desc.format,
dimension,
usage,
range: wgt::ImageSubresourceRange {
aspect: wgt::TextureAspect::All,
base_mip_level: mip_level,
mip_level_count: Some(1),
base_array_layer: array_layer,
array_layer_count: Some(1),
},
};
clear_views.push(
unsafe { self.raw.create_texture_view(&raw_texture, &desc) }
.map_err(DeviceError::from)?,
);
}
}
resource::TextureClearMode::RenderPass {
clear_views,
is_color,
}
} else {
resource::TextureClearMode::BufferCopy
};
let mut texture = self.create_texture_from_hal(
raw_texture,
hal_usage,
self_id,
desc,
format_features,
clear_mode,
);
texture.hal_usage = hal_usage;
Ok(texture)
}
fn create_texture_view(
&self,
texture: &resource::Texture<A>,
texture_id: id::TextureId,
desc: &resource::TextureViewDescriptor,
) -> Result<resource::TextureView<A>, resource::CreateTextureViewError> {
let texture_raw = texture
.inner
.as_raw()
.ok_or(resource::CreateTextureViewError::InvalidTexture)?;
let resolved_format = desc.format.unwrap_or_else(|| {
texture
.desc
.format
.aspect_specific_format(desc.range.aspect)
.unwrap_or(texture.desc.format)
});
let resolved_dimension = desc
.dimension
.unwrap_or_else(|| match texture.desc.dimension {
wgt::TextureDimension::D1 => wgt::TextureViewDimension::D1,
wgt::TextureDimension::D2 => {
if texture.desc.array_layer_count() == 1 {
wgt::TextureViewDimension::D2
} else {
wgt::TextureViewDimension::D2Array
}
}
wgt::TextureDimension::D3 => wgt::TextureViewDimension::D3,
});
let resolved_mip_level_count = desc.range.mip_level_count.unwrap_or_else(|| {
texture
.desc
.mip_level_count
.saturating_sub(desc.range.base_mip_level)
});
let resolved_array_layer_count =
desc.range
.array_layer_count
.unwrap_or_else(|| match resolved_dimension {
wgt::TextureViewDimension::D1
| wgt::TextureViewDimension::D2
| wgt::TextureViewDimension::D3 => 1,
wgt::TextureViewDimension::Cube => 6,
wgt::TextureViewDimension::D2Array | wgt::TextureViewDimension::CubeArray => {
texture
.desc
.array_layer_count()
.saturating_sub(desc.range.base_array_layer)
}
});
let aspects = hal::FormatAspects::new(texture.desc.format, desc.range.aspect);
if aspects.is_empty() {
return Err(resource::CreateTextureViewError::InvalidAspect {
texture_format: texture.desc.format,
requested_aspect: desc.range.aspect,
});
}
let format_is_good = if desc.range.aspect == wgt::TextureAspect::All {
resolved_format == texture.desc.format
|| texture.desc.view_formats.contains(&resolved_format)
} else {
Some(resolved_format)
== texture
.desc
.format
.aspect_specific_format(desc.range.aspect)
};
if !format_is_good {
return Err(resource::CreateTextureViewError::FormatReinterpretation {
texture: texture.desc.format,
view: resolved_format,
});
}
if texture.desc.sample_count > 1 && resolved_dimension != wgt::TextureViewDimension::D2 {
return Err(
resource::CreateTextureViewError::InvalidMultisampledTextureViewDimension(
resolved_dimension,
),
);
}
if texture.desc.dimension != resolved_dimension.compatible_texture_dimension() {
return Err(
resource::CreateTextureViewError::InvalidTextureViewDimension {
view: resolved_dimension,
texture: texture.desc.dimension,
},
);
}
match resolved_dimension {
TextureViewDimension::D1 | TextureViewDimension::D2 | TextureViewDimension::D3 => {
if resolved_array_layer_count != 1 {
return Err(resource::CreateTextureViewError::InvalidArrayLayerCount {
requested: resolved_array_layer_count,
dim: resolved_dimension,
});
}
}
TextureViewDimension::Cube => {
if resolved_array_layer_count != 6 {
return Err(
resource::CreateTextureViewError::InvalidCubemapTextureDepth {
depth: resolved_array_layer_count,
},
);
}
}
TextureViewDimension::CubeArray => {
if resolved_array_layer_count % 6 != 0 {
return Err(
resource::CreateTextureViewError::InvalidCubemapArrayTextureDepth {
depth: resolved_array_layer_count,
},
);
}
}
_ => {}
}
match resolved_dimension {
TextureViewDimension::Cube | TextureViewDimension::CubeArray => {
if texture.desc.size.width != texture.desc.size.height {
return Err(resource::CreateTextureViewError::InvalidCubeTextureViewSize);
}
}
_ => {}
}
if resolved_mip_level_count == 0 {
return Err(resource::CreateTextureViewError::ZeroMipLevelCount);
}
let mip_level_end = desc
.range
.base_mip_level
.saturating_add(resolved_mip_level_count);
let level_end = texture.desc.mip_level_count;
if mip_level_end > level_end {
return Err(resource::CreateTextureViewError::TooManyMipLevels {
requested: mip_level_end,
total: level_end,
});
}
if resolved_array_layer_count == 0 {
return Err(resource::CreateTextureViewError::ZeroArrayLayerCount);
}
let array_layer_end = desc
.range
.base_array_layer
.saturating_add(resolved_array_layer_count);
let layer_end = texture.desc.array_layer_count();
if array_layer_end > layer_end {
return Err(resource::CreateTextureViewError::TooManyArrayLayers {
requested: array_layer_end,
total: layer_end,
});
};
let render_extent = 'b: loop {
if !texture
.desc
.usage
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
break 'b Err(TextureViewNotRenderableReason::Usage(texture.desc.usage));
}
if resolved_dimension != TextureViewDimension::D2 {
break 'b Err(TextureViewNotRenderableReason::Dimension(
resolved_dimension,
));
}
if resolved_mip_level_count != 1 {
break 'b Err(TextureViewNotRenderableReason::MipLevelCount(
resolved_mip_level_count,
));
}
if resolved_array_layer_count != 1 {
break 'b Err(TextureViewNotRenderableReason::ArrayLayerCount(
resolved_array_layer_count,
));
}
if aspects != hal::FormatAspects::from(texture.desc.format) {
break 'b Err(TextureViewNotRenderableReason::Aspects(aspects));
}
break 'b Ok(texture
.desc
.compute_render_extent(desc.range.base_mip_level));
};
let usage = {
let mask_copy = !(hal::TextureUses::COPY_SRC | hal::TextureUses::COPY_DST);
let mask_dimension = match resolved_dimension {
wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => {
hal::TextureUses::RESOURCE
}
wgt::TextureViewDimension::D3 => {
hal::TextureUses::RESOURCE
| hal::TextureUses::STORAGE_READ
| hal::TextureUses::STORAGE_READ_WRITE
}
_ => hal::TextureUses::all(),
};
let mask_mip_level = if resolved_mip_level_count == 1 {
hal::TextureUses::all()
} else {
hal::TextureUses::RESOURCE
};
texture.hal_usage & mask_copy & mask_dimension & mask_mip_level
};
log::debug!(
"Create view for texture {:?} filters usages to {:?}",
texture_id,
usage
);
let format = if resolved_format.is_depth_stencil_component(texture.desc.format) {
texture.desc.format
} else {
resolved_format
};
let resolved_range = wgt::ImageSubresourceRange {
aspect: desc.range.aspect,
base_mip_level: desc.range.base_mip_level,
mip_level_count: Some(resolved_mip_level_count),
base_array_layer: desc.range.base_array_layer,
array_layer_count: Some(resolved_array_layer_count),
};
let hal_desc = hal::TextureViewDescriptor {
label: desc.label.borrow_option(),
format,
dimension: resolved_dimension,
usage,
range: resolved_range,
};
let raw = unsafe {
self.raw
.create_texture_view(texture_raw, &hal_desc)
.map_err(|_| resource::CreateTextureViewError::OutOfMemory)?
};
let selector = TextureSelector {
mips: desc.range.base_mip_level..mip_level_end,
layers: desc.range.base_array_layer..array_layer_end,
};
Ok(resource::TextureView {
raw,
parent_id: Stored {
value: id::Valid(texture_id),
ref_count: texture.life_guard.add_ref(),
},
device_id: texture.device_id.clone(),
desc: resource::HalTextureViewDescriptor {
format: resolved_format,
dimension: resolved_dimension,
range: resolved_range,
},
format_features: texture.format_features,
render_extent,
samples: texture.desc.sample_count,
selector,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
})
}
fn create_sampler(
&self,
self_id: id::DeviceId,
desc: &resource::SamplerDescriptor,
) -> Result<resource::Sampler<A>, resource::CreateSamplerError> {
if desc
.address_modes
.iter()
.any(|am| am == &wgt::AddressMode::ClampToBorder)
{
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER)?;
}
if desc.border_color == Some(wgt::SamplerBorderColor::Zero) {
self.require_features(wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO)?;
}
if desc.lod_min_clamp < 0.0 {
return Err(resource::CreateSamplerError::InvalidLodMinClamp(
desc.lod_min_clamp,
));
}
if desc.lod_max_clamp < desc.lod_min_clamp {
return Err(resource::CreateSamplerError::InvalidLodMaxClamp {
lod_min_clamp: desc.lod_min_clamp,
lod_max_clamp: desc.lod_max_clamp,
});
}
if desc.anisotropy_clamp < 1 {
return Err(resource::CreateSamplerError::InvalidAnisotropy(
desc.anisotropy_clamp,
));
}
if desc.anisotropy_clamp != 1 {
if !matches!(desc.min_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MinFilter,
filter_mode: desc.min_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
if !matches!(desc.mag_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MagFilter,
filter_mode: desc.mag_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
if !matches!(desc.mipmap_filter, wgt::FilterMode::Linear) {
return Err(
resource::CreateSamplerError::InvalidFilterModeWithAnisotropy {
filter_type: resource::SamplerFilterErrorType::MipmapFilter,
filter_mode: desc.mipmap_filter,
anisotropic_clamp: desc.anisotropy_clamp,
},
);
}
}
let anisotropy_clamp = if self
.downlevel
.flags
.contains(wgt::DownlevelFlags::ANISOTROPIC_FILTERING)
{
desc.anisotropy_clamp.min(16)
} else {
1
};
let hal_desc = hal::SamplerDescriptor {
label: desc.label.borrow_option(),
address_modes: desc.address_modes,
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
lod_clamp: desc.lod_min_clamp..desc.lod_max_clamp,
compare: desc.compare,
anisotropy_clamp,
border_color: desc.border_color,
};
let raw = unsafe {
self.raw
.create_sampler(&hal_desc)
.map_err(DeviceError::from)?
};
Ok(resource::Sampler {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
})
}
fn create_shader_module<'a>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: pipeline::ShaderModuleSource<'a>,
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
let (module, source) = match source {
#[cfg(feature = "wgsl")]
pipeline::ShaderModuleSource::Wgsl(code) => {
profiling::scope!("naga::wgsl::parse_str");
let module = naga::front::wgsl::parse_str(&code).map_err(|inner| {
pipeline::CreateShaderModuleError::Parsing(pipeline::ShaderError {
source: code.to_string(),
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
(Cow::Owned(module), code.into_owned())
}
pipeline::ShaderModuleSource::Naga(module) => (module, String::new()),
pipeline::ShaderModuleSource::Dummy(_) => panic!("found `ShaderModuleSource::Dummy`"),
};
for (_, var) in module.global_variables.iter() {
match var.binding {
Some(ref br) if br.group >= self.limits.max_bind_groups => {
return Err(pipeline::CreateShaderModuleError::InvalidGroupIndex {
bind: br.clone(),
group: br.group,
limit: self.limits.max_bind_groups,
});
}
_ => continue,
};
}
use naga::valid::Capabilities as Caps;
profiling::scope!("naga::validate");
let mut caps = Caps::empty();
caps.set(
Caps::PUSH_CONSTANT,
self.features.contains(wgt::Features::PUSH_CONSTANTS),
);
caps.set(
Caps::FLOAT64,
self.features.contains(wgt::Features::SHADER_F64),
);
caps.set(
Caps::PRIMITIVE_INDEX,
self.features
.contains(wgt::Features::SHADER_PRIMITIVE_INDEX),
);
caps.set(
Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::SAMPLER_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS,
self.features
.contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM),
);
caps.set(
Caps::MULTIVIEW,
self.features.contains(wgt::Features::MULTIVIEW),
);
caps.set(
Caps::EARLY_DEPTH_TEST,
self.features
.contains(wgt::Features::SHADER_EARLY_DEPTH_TEST),
);
caps.set(
Caps::MULTISAMPLED_SHADING,
self.downlevel
.flags
.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING),
);
let info = naga::valid::Validator::new(naga::valid::ValidationFlags::all(), caps)
.validate(&module)
.map_err(|inner| {
pipeline::CreateShaderModuleError::Validation(pipeline::ShaderError {
source,
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
let interface =
validation::Interface::new(&module, &info, self.features, self.limits.clone());
let hal_shader = hal::ShaderInput::Naga(hal::NagaShader { module, info });
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.borrow_option(),
runtime_checks: desc.shader_bound_checks.runtime_checks(),
};
let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(error.into())
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
Ok(pipeline::ShaderModule {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
interface: Some(interface),
#[cfg(debug_assertions)]
label: desc.label.borrow_or_default().to_string(),
})
}
#[allow(unused_unsafe)]
unsafe fn create_shader_module_spirv<'a>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: &'a [u32],
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.borrow_option(),
runtime_checks: desc.shader_bound_checks.runtime_checks(),
};
let hal_shader = hal::ShaderInput::SpirV(source);
let raw = match unsafe { self.raw.create_shader_module(&hal_desc, hal_shader) } {
Ok(raw) => raw,
Err(error) => {
return Err(match error {
hal::ShaderError::Device(error) => {
pipeline::CreateShaderModuleError::Device(error.into())
}
hal::ShaderError::Compilation(ref msg) => {
log::error!("Shader error: {}", msg);
pipeline::CreateShaderModuleError::Generation
}
})
}
};
Ok(pipeline::ShaderModule {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
interface: None,
#[cfg(debug_assertions)]
label: desc.label.borrow_or_default().to_string(),
})
}
fn deduplicate_bind_group_layout(
self_id: id::DeviceId,
entry_map: &binding_model::BindEntryMap,
guard: &Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> Option<id::BindGroupLayoutId> {
guard
.iter(self_id.backend())
.find(|&(_, bgl)| bgl.device_id.value.0 == self_id && bgl.entries == *entry_map)
.map(|(id, value)| {
value.multi_ref_count.inc();
id
})
}
fn get_introspection_bind_group_layouts<'a>(
pipeline_layout: &binding_model::PipelineLayout<A>,
bgl_guard: &'a Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> ArrayVec<&'a binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }> {
pipeline_layout
.bind_group_layout_ids
.iter()
.map(|&id| &bgl_guard[id].entries)
.collect()
}
fn make_late_sized_buffer_groups<'a>(
shader_binding_sizes: &FastHashMap<naga::ResourceBinding, wgt::BufferSize>,
layout: &binding_model::PipelineLayout<A>,
bgl_guard: &'a Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> ArrayVec<pipeline::LateSizedBufferGroup, { hal::MAX_BIND_GROUPS }> {
layout
.bind_group_layout_ids
.iter()
.enumerate()
.map(|(group_index, &bgl_id)| pipeline::LateSizedBufferGroup {
shader_sizes: bgl_guard[bgl_id]
.entries
.values()
.filter_map(|entry| match entry.ty {
wgt::BindingType::Buffer {
min_binding_size: None,
..
} => {
let rb = naga::ResourceBinding {
group: group_index as u32,
binding: entry.binding,
};
let shader_size =
shader_binding_sizes.get(&rb).map_or(0, |nz| nz.get());
Some(shader_size)
}
_ => None,
})
.collect(),
})
.collect()
}
fn create_bind_group_layout(
&self,
self_id: id::DeviceId,
label: Option<&str>,
entry_map: binding_model::BindEntryMap,
) -> Result<binding_model::BindGroupLayout<A>, binding_model::CreateBindGroupLayoutError> {
#[derive(PartialEq)]
enum WritableStorage {
Yes,
No,
}
for entry in entry_map.values() {
use wgt::BindingType as Bt;
let mut required_features = wgt::Features::empty();
let mut required_downlevel_flags = wgt::DownlevelFlags::empty();
let (array_feature, writable_storage) = match entry.ty {
Bt::Buffer {
ty: wgt::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: _,
} => (
Some(wgt::Features::BUFFER_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Buffer {
ty: wgt::BufferBindingType::Uniform,
has_dynamic_offset: true,
min_binding_size: _,
} => (
Some(wgt::Features::BUFFER_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Buffer {
ty: wgt::BufferBindingType::Storage { read_only },
..
} => (
Some(
wgt::Features::BUFFER_BINDING_ARRAY
| wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
),
match read_only {
true => WritableStorage::No,
false => WritableStorage::Yes,
},
),
Bt::Sampler { .. } => (
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
),
Bt::Texture {
multisampled: true,
sample_type: TextureSampleType::Float { filterable: true },
..
} => {
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
});
}
Bt::Texture { .. } => (
Some(wgt::Features::TEXTURE_BINDING_ARRAY),
WritableStorage::No,
),
Bt::StorageTexture {
access,
view_dimension,
format: _,
} => {
match view_dimension {
wgt::TextureViewDimension::Cube | wgt::TextureViewDimension::CubeArray => {
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::StorageTextureCube,
})
}
_ => (),
}
match access {
wgt::StorageTextureAccess::ReadOnly
| wgt::StorageTextureAccess::ReadWrite
if !self.features.contains(
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
) =>
{
return Err(binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error: binding_model::BindGroupLayoutEntryError::StorageTextureReadWrite,
});
}
_ => (),
}
(
Some(
wgt::Features::TEXTURE_BINDING_ARRAY
| wgt::Features::STORAGE_RESOURCE_BINDING_ARRAY,
),
match access {
wgt::StorageTextureAccess::WriteOnly => WritableStorage::Yes,
wgt::StorageTextureAccess::ReadOnly => {
required_features |=
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
WritableStorage::No
}
wgt::StorageTextureAccess::ReadWrite => {
required_features |=
wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
WritableStorage::Yes
}
},
)
}
};
if entry.count.is_some() {
required_features |= array_feature
.ok_or(binding_model::BindGroupLayoutEntryError::ArrayUnsupported)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
}
if entry.visibility.contains_invalid_bits() {
return Err(
binding_model::CreateBindGroupLayoutError::InvalidVisibility(entry.visibility),
);
}
if entry.visibility.contains(wgt::ShaderStages::VERTEX) {
if writable_storage == WritableStorage::Yes {
required_features |= wgt::Features::VERTEX_WRITABLE_STORAGE;
}
if let Bt::Buffer {
ty: wgt::BufferBindingType::Storage { .. },
..
} = entry.ty
{
required_downlevel_flags |= wgt::DownlevelFlags::VERTEX_STORAGE;
}
}
if writable_storage == WritableStorage::Yes
&& entry.visibility.contains(wgt::ShaderStages::FRAGMENT)
{
required_downlevel_flags |= wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE;
}
self.require_features(required_features)
.map_err(binding_model::BindGroupLayoutEntryError::MissingFeatures)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
self.require_downlevel_flags(required_downlevel_flags)
.map_err(binding_model::BindGroupLayoutEntryError::MissingDownlevelFlags)
.map_err(|error| binding_model::CreateBindGroupLayoutError::Entry {
binding: entry.binding,
error,
})?;
}
let bgl_flags = conv::bind_group_layout_flags(self.features);
let mut hal_bindings = entry_map.values().cloned().collect::<Vec<_>>();
hal_bindings.sort_by_key(|b| b.binding);
let hal_desc = hal::BindGroupLayoutDescriptor {
label,
flags: bgl_flags,
entries: &hal_bindings,
};
let raw = unsafe {
self.raw
.create_bind_group_layout(&hal_desc)
.map_err(DeviceError::from)?
};
let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
for entry in entry_map.values() {
count_validator.add_binding(entry);
}
count_validator
.validate(&self.limits)
.map_err(binding_model::CreateBindGroupLayoutError::TooManyBindings)?;
Ok(binding_model::BindGroupLayout {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
multi_ref_count: MultiRefCount::new(),
dynamic_count: entry_map
.values()
.filter(|b| b.ty.has_dynamic_offset())
.count(),
count_validator,
entries: entry_map,
#[cfg(debug_assertions)]
label: label.unwrap_or("").to_string(),
})
}
fn create_buffer_binding<'a>(
bb: &binding_model::BufferBinding,
binding: u32,
decl: &wgt::BindGroupLayoutEntry,
used_buffer_ranges: &mut Vec<BufferInitTrackerAction>,
dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
late_buffer_binding_sizes: &mut FastHashMap<u32, wgt::BufferSize>,
used: &mut BindGroupStates<A>,
storage: &'a Storage<resource::Buffer<A>, id::BufferId>,
limits: &wgt::Limits,
) -> Result<hal::BufferBinding<'a, A>, binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
let (binding_ty, dynamic, min_size) = match decl.ty {
wgt::BindingType::Buffer {
ty,
has_dynamic_offset,
min_binding_size,
} => (ty, has_dynamic_offset, min_binding_size),
_ => {
return Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected: "UniformBuffer, StorageBuffer or ReadonlyStorageBuffer",
})
}
};
let (pub_usage, internal_use, range_limit) = match binding_ty {
wgt::BufferBindingType::Uniform => (
wgt::BufferUsages::UNIFORM,
hal::BufferUses::UNIFORM,
limits.max_uniform_buffer_binding_size,
),
wgt::BufferBindingType::Storage { read_only } => (
wgt::BufferUsages::STORAGE,
if read_only {
hal::BufferUses::STORAGE_READ
} else {
hal::BufferUses::STORAGE_READ_WRITE
},
limits.max_storage_buffer_binding_size,
),
};
let (align, align_limit_name) =
binding_model::buffer_binding_type_alignment(limits, binding_ty);
if bb.offset % align as u64 != 0 {
return Err(Error::UnalignedBufferOffset(
bb.offset,
align_limit_name,
align,
));
}
let buffer = used
.buffers
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
check_buffer_usage(buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.as_ref()
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
let (bind_size, bind_end) = match bb.size {
Some(size) => {
let end = bb.offset + size.get();
if end > buffer.size {
return Err(Error::BindingRangeTooLarge {
buffer: bb.buffer_id,
range: bb.offset..end,
size: buffer.size,
});
}
(size.get(), end)
}
None => (buffer.size - bb.offset, buffer.size),
};
if bind_size > range_limit as u64 {
return Err(Error::BufferRangeTooLarge {
binding,
given: bind_size as u32,
limit: range_limit,
});
}
if dynamic {
dynamic_binding_info.push(binding_model::BindGroupDynamicBindingData {
binding_idx: binding,
buffer_size: buffer.size,
binding_range: bb.offset..bind_end,
maximum_dynamic_offset: buffer.size - bind_end,
binding_type: binding_ty,
});
}
if let Some(non_zero) = min_size {
let min_size = non_zero.get();
if min_size > bind_size {
return Err(Error::BindingSizeTooSmall {
buffer: bb.buffer_id,
actual: bind_size,
min: min_size,
});
}
} else {
let late_size =
wgt::BufferSize::new(bind_size).ok_or(Error::BindingZeroSize(bb.buffer_id))?;
late_buffer_binding_sizes.insert(binding, late_size);
}
assert_eq!(bb.offset % wgt::COPY_BUFFER_ALIGNMENT, 0);
used_buffer_ranges.extend(buffer.initialization_status.create_action(
bb.buffer_id,
bb.offset..bb.offset + bind_size,
MemoryInitKind::NeedsInitializedMemory,
));
Ok(hal::BufferBinding {
buffer: raw_buffer,
offset: bb.offset,
size: bb.size,
})
}
fn create_texture_binding(
view: &resource::TextureView<A>,
texture_guard: &Storage<resource::Texture<A>, id::TextureId>,
internal_use: hal::TextureUses,
pub_usage: wgt::TextureUsages,
used: &mut BindGroupStates<A>,
used_texture_ranges: &mut Vec<TextureInitTrackerAction>,
) -> Result<(), binding_model::CreateBindGroupError> {
let texture = used
.textures
.add_single(
texture_guard,
view.parent_id.value.0,
view.parent_id.ref_count.clone(),
Some(view.selector.clone()),
internal_use,
)
.ok_or(binding_model::CreateBindGroupError::InvalidTexture(
view.parent_id.value.0,
))?;
check_texture_usage(texture.desc.usage, pub_usage)?;
used_texture_ranges.push(TextureInitTrackerAction {
id: view.parent_id.value.0,
range: TextureInitRange {
mip_range: view.desc.range.mip_range(texture.desc.mip_level_count),
layer_range: view
.desc
.range
.layer_range(texture.desc.array_layer_count()),
},
kind: MemoryInitKind::NeedsInitializedMemory,
});
Ok(())
}
fn create_bind_group<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
layout: &binding_model::BindGroupLayout<A>,
desc: &binding_model::BindGroupDescriptor,
hub: &Hub<A, G>,
token: &mut Token<binding_model::BindGroupLayout<A>>,
) -> Result<binding_model::BindGroup<A>, binding_model::CreateBindGroupError> {
use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
{
let actual = desc.entries.len();
let expected = layout.entries.len();
if actual != expected {
return Err(Error::BindingsNumMismatch { expected, actual });
}
}
let mut dynamic_binding_info = Vec::new();
let mut late_buffer_binding_sizes = FastHashMap::default();
let mut used = BindGroupStates::new();
let (buffer_guard, mut token) = hub.buffers.read(token);
let (texture_guard, mut token) = hub.textures.read(&mut token); let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
let mut used_buffer_ranges = Vec::new();
let mut used_texture_ranges = Vec::new();
let mut hal_entries = Vec::with_capacity(desc.entries.len());
let mut hal_buffers = Vec::new();
let mut hal_samplers = Vec::new();
let mut hal_textures = Vec::new();
for entry in desc.entries.iter() {
let binding = entry.binding;
let decl = layout
.entries
.get(&binding)
.ok_or(Error::MissingBindingDeclaration(binding))?;
let (res_index, count) = match entry.resource {
Br::Buffer(ref bb) => {
let bb = Self::create_buffer_binding(
bb,
binding,
decl,
&mut used_buffer_ranges,
&mut dynamic_binding_info,
&mut late_buffer_binding_sizes,
&mut used,
&*buffer_guard,
&self.limits,
)?;
let res_index = hal_buffers.len();
hal_buffers.push(bb);
(res_index, 1)
}
Br::BufferArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_buffers.len();
for bb in bindings_array.iter() {
let bb = Self::create_buffer_binding(
bb,
binding,
decl,
&mut used_buffer_ranges,
&mut dynamic_binding_info,
&mut late_buffer_binding_sizes,
&mut used,
&*buffer_guard,
&self.limits,
)?;
hal_buffers.push(bb);
}
(res_index, num_bindings)
}
Br::Sampler(id) => {
match decl.ty {
wgt::BindingType::Sampler(ty) => {
let sampler = used
.samplers
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
let (allowed_filtering, allowed_comparison) = match ty {
wgt::SamplerBindingType::Filtering => (None, false),
wgt::SamplerBindingType::NonFiltering => (Some(false), false),
wgt::SamplerBindingType::Comparison => (None, true),
};
if let Some(allowed_filtering) = allowed_filtering {
if allowed_filtering != sampler.filtering {
return Err(Error::WrongSamplerFiltering {
binding,
layout_flt: allowed_filtering,
sampler_flt: sampler.filtering,
});
}
}
if allowed_comparison != sampler.comparison {
return Err(Error::WrongSamplerComparison {
binding,
layout_cmp: allowed_comparison,
sampler_cmp: sampler.comparison,
});
}
let res_index = hal_samplers.len();
hal_samplers.push(&sampler.raw);
(res_index, 1)
}
_ => {
return Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected: "Sampler",
})
}
}
}
Br::SamplerArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_samplers.len();
for &id in bindings_array.iter() {
let sampler = used
.samplers
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
hal_samplers.push(&sampler.raw);
}
(res_index, num_bindings)
}
Br::TextureView(id) => {
let view = used
.views
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) = Self::texture_use_parameters(
binding,
decl,
view,
"SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
)?;
Self::create_texture_binding(
view,
&texture_guard,
internal_use,
pub_usage,
&mut used,
&mut used_texture_ranges,
)?;
let res_index = hal_textures.len();
hal_textures.push(hal::TextureBinding {
view: &view.raw,
usage: internal_use,
});
(res_index, 1)
}
Br::TextureViewArray(ref bindings_array) => {
let num_bindings = bindings_array.len();
Self::check_array_binding(self.features, decl.count, num_bindings)?;
let res_index = hal_textures.len();
for &id in bindings_array.iter() {
let view = used
.views
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) =
Self::texture_use_parameters(binding, decl, view,
"SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?;
Self::create_texture_binding(
view,
&texture_guard,
internal_use,
pub_usage,
&mut used,
&mut used_texture_ranges,
)?;
hal_textures.push(hal::TextureBinding {
view: &view.raw,
usage: internal_use,
});
}
(res_index, num_bindings)
}
};
hal_entries.push(hal::BindGroupEntry {
binding,
resource_index: res_index as u32,
count: count as u32,
});
}
used.optimize();
hal_entries.sort_by_key(|entry| entry.binding);
for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) {
if a.binding == b.binding {
return Err(Error::DuplicateBinding(a.binding));
}
}
let hal_desc = hal::BindGroupDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
entries: &hal_entries,
buffers: &hal_buffers,
samplers: &hal_samplers,
textures: &hal_textures,
};
let raw = unsafe {
self.raw
.create_bind_group(&hal_desc)
.map_err(DeviceError::from)?
};
layout.multi_ref_count.inc();
Ok(binding_model::BindGroup {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
layout_id: id::Valid(desc.layout),
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
used,
used_buffer_ranges,
used_texture_ranges,
dynamic_binding_info,
late_buffer_binding_sizes: layout
.entries
.keys()
.flat_map(|binding| late_buffer_binding_sizes.get(binding).cloned())
.collect(),
})
}
fn check_array_binding(
features: wgt::Features,
count: Option<NonZeroU32>,
num_bindings: usize,
) -> Result<(), super::binding_model::CreateBindGroupError> {
use super::binding_model::CreateBindGroupError as Error;
if let Some(count) = count {
let count = count.get() as usize;
if count < num_bindings {
return Err(Error::BindingArrayPartialLengthMismatch {
actual: num_bindings,
expected: count,
});
}
if count != num_bindings
&& !features.contains(wgt::Features::PARTIALLY_BOUND_BINDING_ARRAY)
{
return Err(Error::BindingArrayLengthMismatch {
actual: num_bindings,
expected: count,
});
}
if num_bindings == 0 {
return Err(Error::BindingArrayZeroLength);
}
} else {
return Err(Error::SingleBindingExpected);
};
Ok(())
}
fn texture_use_parameters(
binding: u32,
decl: &wgt::BindGroupLayoutEntry,
view: &crate::resource::TextureView<A>,
expected: &'static str,
) -> Result<(wgt::TextureUsages, hal::TextureUses), binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
if view
.desc
.aspects()
.contains(hal::FormatAspects::DEPTH | hal::FormatAspects::STENCIL)
{
return Err(Error::DepthStencilAspect);
}
match decl.ty {
wgt::BindingType::Texture {
sample_type,
view_dimension,
multisampled,
} => {
use wgt::TextureSampleType as Tst;
if multisampled != (view.samples != 1) {
return Err(Error::InvalidTextureMultisample {
binding,
layout_multisampled: multisampled,
view_samples: view.samples,
});
}
let compat_sample_type = view
.desc
.format
.sample_type(Some(view.desc.range.aspect))
.unwrap();
match (sample_type, compat_sample_type) {
(Tst::Uint, Tst::Uint) |
(Tst::Sint, Tst::Sint) |
(Tst::Depth, Tst::Depth) |
(Tst::Float { filterable: false }, Tst::Float { .. }) |
(Tst::Float { filterable: true }, Tst::Float { filterable: true }) |
(Tst::Float { filterable: false }, Tst::Depth) => {}
(Tst::Float { filterable: true }, Tst::Float { .. }) if view.format_features.flags.contains(wgt::TextureFormatFeatureFlags::FILTERABLE) => {}
_ => {
return Err(Error::InvalidTextureSampleType {
binding,
layout_sample_type: sample_type,
view_format: view.desc.format,
})
}
}
if view_dimension != view.desc.dimension {
return Err(Error::InvalidTextureDimension {
binding,
layout_dimension: view_dimension,
view_dimension: view.desc.dimension,
});
}
Ok((
wgt::TextureUsages::TEXTURE_BINDING,
hal::TextureUses::RESOURCE,
))
}
wgt::BindingType::StorageTexture {
access,
format,
view_dimension,
} => {
if format != view.desc.format {
return Err(Error::InvalidStorageTextureFormat {
binding,
layout_format: format,
view_format: view.desc.format,
});
}
if view_dimension != view.desc.dimension {
return Err(Error::InvalidTextureDimension {
binding,
layout_dimension: view_dimension,
view_dimension: view.desc.dimension,
});
}
let mip_level_count = view.selector.mips.end - view.selector.mips.start;
if mip_level_count != 1 {
return Err(Error::InvalidStorageTextureMipLevelCount {
binding,
mip_level_count,
});
}
let internal_use = match access {
wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE,
wgt::StorageTextureAccess::ReadOnly => {
if !view
.format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
{
return Err(Error::StorageReadNotSupported(view.desc.format));
}
hal::TextureUses::STORAGE_READ
}
wgt::StorageTextureAccess::ReadWrite => {
if !view
.format_features
.flags
.contains(wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE)
{
return Err(Error::StorageReadNotSupported(view.desc.format));
}
hal::TextureUses::STORAGE_READ_WRITE
}
};
Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use))
}
_ => Err(Error::WrongBindingType {
binding,
actual: decl.ty,
expected,
}),
}
}
fn create_pipeline_layout(
&self,
self_id: id::DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
bgl_guard: &Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
) -> Result<binding_model::PipelineLayout<A>, binding_model::CreatePipelineLayoutError> {
use crate::binding_model::CreatePipelineLayoutError as Error;
let bind_group_layouts_count = desc.bind_group_layouts.len();
let device_max_bind_groups = self.limits.max_bind_groups as usize;
if bind_group_layouts_count > device_max_bind_groups {
return Err(Error::TooManyGroups {
actual: bind_group_layouts_count,
max: device_max_bind_groups,
});
}
if !desc.push_constant_ranges.is_empty() {
self.require_features(wgt::Features::PUSH_CONSTANTS)?;
}
let mut used_stages = wgt::ShaderStages::empty();
for (index, pc) in desc.push_constant_ranges.iter().enumerate() {
if pc.stages.intersects(used_stages) {
return Err(Error::MoreThanOnePushConstantRangePerStage {
index,
provided: pc.stages,
intersected: pc.stages & used_stages,
});
}
used_stages |= pc.stages;
let device_max_pc_size = self.limits.max_push_constant_size;
if device_max_pc_size < pc.range.end {
return Err(Error::PushConstantRangeTooLarge {
index,
range: pc.range.clone(),
max: device_max_pc_size,
});
}
if pc.range.start % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
return Err(Error::MisalignedPushConstantRange {
index,
bound: pc.range.start,
});
}
if pc.range.end % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
return Err(Error::MisalignedPushConstantRange {
index,
bound: pc.range.end,
});
}
}
let mut count_validator = binding_model::BindingTypeMaxCountValidator::default();
for &id in desc.bind_group_layouts.iter() {
let bind_group_layout = bgl_guard
.get(id)
.map_err(|_| Error::InvalidBindGroupLayout(id))?;
count_validator.merge(&bind_group_layout.count_validator);
}
count_validator
.validate(&self.limits)
.map_err(Error::TooManyBindings)?;
let bgl_vec = desc
.bind_group_layouts
.iter()
.map(|&id| &bgl_guard.get(id).unwrap().raw)
.collect::<Vec<_>>();
let hal_desc = hal::PipelineLayoutDescriptor {
label: desc.label.borrow_option(),
flags: hal::PipelineLayoutFlags::BASE_VERTEX_INSTANCE,
bind_group_layouts: &bgl_vec,
push_constant_ranges: desc.push_constant_ranges.as_ref(),
};
let raw = unsafe {
self.raw
.create_pipeline_layout(&hal_desc)
.map_err(DeviceError::from)?
};
Ok(binding_model::PipelineLayout {
raw,
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
bind_group_layout_ids: desc
.bind_group_layouts
.iter()
.map(|&id| {
bgl_guard.get(id).unwrap().multi_ref_count.inc();
id::Valid(id)
})
.collect(),
push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
})
}
fn derive_pipeline_layout(
&self,
self_id: id::DeviceId,
implicit_context: Option<ImplicitPipelineContext>,
mut derived_group_layouts: ArrayVec<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>,
bgl_guard: &mut Storage<binding_model::BindGroupLayout<A>, id::BindGroupLayoutId>,
pipeline_layout_guard: &mut Storage<binding_model::PipelineLayout<A>, id::PipelineLayoutId>,
) -> Result<id::PipelineLayoutId, pipeline::ImplicitLayoutError> {
while derived_group_layouts
.last()
.map_or(false, |map| map.is_empty())
{
derived_group_layouts.pop();
}
let mut ids = implicit_context.ok_or(pipeline::ImplicitLayoutError::MissingIds(0))?;
let group_count = derived_group_layouts.len();
if ids.group_ids.len() < group_count {
log::error!(
"Not enough bind group IDs ({}) specified for the implicit layout ({})",
ids.group_ids.len(),
derived_group_layouts.len()
);
return Err(pipeline::ImplicitLayoutError::MissingIds(group_count as _));
}
for (bgl_id, map) in ids.group_ids.iter_mut().zip(derived_group_layouts) {
match Device::deduplicate_bind_group_layout(self_id, &map, bgl_guard) {
Some(dedup_id) => {
*bgl_id = dedup_id;
}
None => {
let bgl = self.create_bind_group_layout(self_id, None, map)?;
bgl_guard.force_replace(*bgl_id, bgl);
}
};
}
let layout_desc = binding_model::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: Cow::Borrowed(&ids.group_ids[..group_count]),
push_constant_ranges: Cow::Borrowed(&[]), };
let layout = self.create_pipeline_layout(self_id, &layout_desc, bgl_guard)?;
pipeline_layout_guard.force_replace(ids.root_id, layout);
Ok(ids.root_id)
}
fn create_compute_pipeline<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
implicit_context: Option<ImplicitPipelineContext>,
hub: &Hub<A, G>,
token: &mut Token<Self>,
) -> Result<pipeline::ComputePipeline<A>, pipeline::CreateComputePipelineError> {
let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
if let Some(ref ids) = implicit_context {
pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
for &bgl_id in ids.group_ids.iter() {
bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
}
}
self.require_downlevel_flags(wgt::DownlevelFlags::COMPUTE_SHADERS)?;
let mut derived_group_layouts =
ArrayVec::<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>::new();
let mut shader_binding_sizes = FastHashMap::default();
let io = validation::StageIo::default();
let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
let shader_module = shader_module_guard
.get(desc.stage.module)
.map_err(|_| validation::StageError::InvalidModule)?;
{
let flag = wgt::ShaderStages::COMPUTE;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
}
None
}
};
if let Some(ref interface) = shader_module.interface {
let _ = interface.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&desc.stage.entry_point,
flag,
io,
None,
)?;
}
}
let pipeline_layout_id = match desc.layout {
Some(id) => id,
None => self.derive_pipeline_layout(
self_id,
implicit_context,
derived_group_layouts,
&mut *bgl_guard,
&mut *pipeline_layout_guard,
)?,
};
let layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
let late_sized_buffer_groups =
Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard);
let pipeline_desc = hal::ComputePipelineDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
stage: hal::ProgrammableStage {
entry_point: desc.stage.entry_point.as_ref(),
module: &shader_module.raw,
},
};
let raw =
unsafe { self.raw.create_compute_pipeline(&pipeline_desc) }.map_err(
|err| match err {
hal::PipelineError::Device(error) => {
pipeline::CreateComputePipelineError::Device(error.into())
}
hal::PipelineError::Linkage(_stages, msg) => {
pipeline::CreateComputePipelineError::Internal(msg)
}
hal::PipelineError::EntryPoint(_stage) => {
pipeline::CreateComputePipelineError::Internal(EP_FAILURE.to_string())
}
},
)?;
let pipeline = pipeline::ComputePipeline {
raw,
layout_id: Stored {
value: id::Valid(pipeline_layout_id),
ref_count: layout.life_guard.add_ref(),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
late_sized_buffer_groups,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
};
Ok(pipeline)
}
fn create_render_pipeline<G: GlobalIdentityHandlerFactory>(
&self,
self_id: id::DeviceId,
adapter: &Adapter<A>,
desc: &pipeline::RenderPipelineDescriptor,
implicit_context: Option<ImplicitPipelineContext>,
hub: &Hub<A, G>,
token: &mut Token<Self>,
) -> Result<pipeline::RenderPipeline<A>, pipeline::CreateRenderPipelineError> {
use wgt::TextureFormatFeatureFlags as Tfff;
let (mut pipeline_layout_guard, mut token) = hub.pipeline_layouts.write(token);
let (mut bgl_guard, mut token) = hub.bind_group_layouts.write(&mut token);
if let Some(ref ids) = implicit_context {
pipeline_layout_guard.insert_error(ids.root_id, IMPLICIT_FAILURE);
for &bgl_id in ids.group_ids.iter() {
bgl_guard.insert_error(bgl_id, IMPLICIT_FAILURE);
}
}
let mut derived_group_layouts =
ArrayVec::<binding_model::BindEntryMap, { hal::MAX_BIND_GROUPS }>::new();
let mut shader_binding_sizes = FastHashMap::default();
let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
if num_attachments > hal::MAX_COLOR_ATTACHMENTS {
return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
command::ColorAttachmentError::TooMany {
given: num_attachments,
limit: hal::MAX_COLOR_ATTACHMENTS,
},
));
}
let color_targets = desc
.fragment
.as_ref()
.map_or(&[][..], |fragment| &fragment.targets);
let depth_stencil_state = desc.depth_stencil.as_ref();
let cts: ArrayVec<_, { hal::MAX_COLOR_ATTACHMENTS }> =
color_targets.iter().filter_map(|x| x.as_ref()).collect();
if !cts.is_empty() && {
let first = &cts[0];
cts[1..]
.iter()
.any(|ct| ct.write_mask != first.write_mask || ct.blend != first.blend)
} {
log::info!("Color targets: {:?}", color_targets);
self.require_downlevel_flags(wgt::DownlevelFlags::INDEPENDENT_BLEND)?;
}
let mut io = validation::StageIo::default();
let mut validated_stages = wgt::ShaderStages::empty();
let mut vertex_steps = Vec::with_capacity(desc.vertex.buffers.len());
let mut vertex_buffers = Vec::with_capacity(desc.vertex.buffers.len());
let mut total_attributes = 0;
for (i, vb_state) in desc.vertex.buffers.iter().enumerate() {
vertex_steps.push(pipeline::VertexStep {
stride: vb_state.array_stride,
mode: vb_state.step_mode,
});
if vb_state.attributes.is_empty() {
continue;
}
if vb_state.array_stride > self.limits.max_vertex_buffer_array_stride as u64 {
return Err(pipeline::CreateRenderPipelineError::VertexStrideTooLarge {
index: i as u32,
given: vb_state.array_stride as u32,
limit: self.limits.max_vertex_buffer_array_stride,
});
}
if vb_state.array_stride % wgt::VERTEX_STRIDE_ALIGNMENT != 0 {
return Err(pipeline::CreateRenderPipelineError::UnalignedVertexStride {
index: i as u32,
stride: vb_state.array_stride,
});
}
vertex_buffers.push(hal::VertexBufferLayout {
array_stride: vb_state.array_stride,
step_mode: vb_state.step_mode,
attributes: vb_state.attributes.as_ref(),
});
for attribute in vb_state.attributes.iter() {
if attribute.offset >= 0x10000000 {
return Err(
pipeline::CreateRenderPipelineError::InvalidVertexAttributeOffset {
location: attribute.shader_location,
offset: attribute.offset,
},
);
}
if let wgt::VertexFormat::Float64
| wgt::VertexFormat::Float64x2
| wgt::VertexFormat::Float64x3
| wgt::VertexFormat::Float64x4 = attribute.format
{
self.require_features(wgt::Features::VERTEX_ATTRIBUTE_64BIT)?;
}
let previous = io.insert(
attribute.shader_location,
validation::InterfaceVar::vertex_attribute(attribute.format),
);
if previous.is_some() {
return Err(pipeline::CreateRenderPipelineError::ShaderLocationClash(
attribute.shader_location,
));
}
}
total_attributes += vb_state.attributes.len();
}
if vertex_buffers.len() > self.limits.max_vertex_buffers as usize {
return Err(pipeline::CreateRenderPipelineError::TooManyVertexBuffers {
given: vertex_buffers.len() as u32,
limit: self.limits.max_vertex_buffers,
});
}
if total_attributes > self.limits.max_vertex_attributes as usize {
return Err(
pipeline::CreateRenderPipelineError::TooManyVertexAttributes {
given: total_attributes as u32,
limit: self.limits.max_vertex_attributes,
},
);
}
if desc.primitive.strip_index_format.is_some() && !desc.primitive.topology.is_strip() {
return Err(
pipeline::CreateRenderPipelineError::StripIndexFormatForNonStripTopology {
strip_index_format: desc.primitive.strip_index_format,
topology: desc.primitive.topology,
},
);
}
if desc.primitive.unclipped_depth {
self.require_features(wgt::Features::DEPTH_CLIP_CONTROL)?;
}
if desc.primitive.polygon_mode == wgt::PolygonMode::Line {
self.require_features(wgt::Features::POLYGON_MODE_LINE)?;
}
if desc.primitive.polygon_mode == wgt::PolygonMode::Point {
self.require_features(wgt::Features::POLYGON_MODE_POINT)?;
}
if desc.primitive.conservative {
self.require_features(wgt::Features::CONSERVATIVE_RASTERIZATION)?;
}
if desc.primitive.conservative && desc.primitive.polygon_mode != wgt::PolygonMode::Fill {
return Err(
pipeline::CreateRenderPipelineError::ConservativeRasterizationNonFillPolygonMode,
);
}
for (i, cs) in color_targets.iter().enumerate() {
if let Some(cs) = cs.as_ref() {
let error = loop {
if cs.write_mask.contains_invalid_bits() {
break Some(pipeline::ColorStateError::InvalidWriteMask(cs.write_mask));
}
let format_features = self.describe_format_features(adapter, cs.format)?;
if !format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
break Some(pipeline::ColorStateError::FormatNotRenderable(cs.format));
}
let blendable = format_features.flags.contains(Tfff::BLENDABLE);
let filterable = format_features.flags.contains(Tfff::FILTERABLE);
let adapter_specific = self
.features
.contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
if cs.blend.is_some() && (!blendable || (!filterable && !adapter_specific)) {
break Some(pipeline::ColorStateError::FormatNotBlendable(cs.format));
}
if !hal::FormatAspects::from(cs.format).contains(hal::FormatAspects::COLOR) {
break Some(pipeline::ColorStateError::FormatNotColor(cs.format));
}
if desc.multisample.count > 1
&& !format_features
.flags
.sample_count_supported(desc.multisample.count)
{
break Some(pipeline::ColorStateError::FormatNotMultisampled(cs.format));
}
break None;
};
if let Some(e) = error {
return Err(pipeline::CreateRenderPipelineError::ColorState(i as u8, e));
}
}
}
if let Some(ds) = depth_stencil_state {
let error = loop {
let format_features = self.describe_format_features(adapter, ds.format)?;
if !format_features
.allowed_usages
.contains(wgt::TextureUsages::RENDER_ATTACHMENT)
{
break Some(pipeline::DepthStencilStateError::FormatNotRenderable(
ds.format,
));
}
let aspect = hal::FormatAspects::from(ds.format);
if ds.is_depth_enabled() && !aspect.contains(hal::FormatAspects::DEPTH) {
break Some(pipeline::DepthStencilStateError::FormatNotDepth(ds.format));
}
if ds.stencil.is_enabled() && !aspect.contains(hal::FormatAspects::STENCIL) {
break Some(pipeline::DepthStencilStateError::FormatNotStencil(
ds.format,
));
}
if desc.multisample.count > 1
&& !format_features
.flags
.sample_count_supported(desc.multisample.count)
{
break Some(pipeline::DepthStencilStateError::FormatNotMultisampled(
ds.format,
));
}
break None;
};
if let Some(e) = error {
return Err(pipeline::CreateRenderPipelineError::DepthStencilState(e));
}
if ds.bias.clamp != 0.0 {
self.require_downlevel_flags(wgt::DownlevelFlags::DEPTH_BIAS_CLAMP)?;
}
}
if desc.layout.is_none() {
for _ in 0..self.limits.max_bind_groups {
derived_group_layouts.push(binding_model::BindEntryMap::default());
}
}
let samples = {
let sc = desc.multisample.count;
if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) {
return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
}
sc
};
let (shader_module_guard, _) = hub.shader_modules.read(&mut token);
let vertex_stage = {
let stage = &desc.vertex.stage;
let flag = wgt::ShaderStages::VERTEX;
let shader_module = shader_module_guard.get(stage.module).map_err(|_| {
pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error: validation::StageError::InvalidModule,
}
})?;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => {
let pipeline_layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
Some(Device::get_introspection_bind_group_layouts(
pipeline_layout,
&*bgl_guard,
))
}
None => None,
};
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&stage.entry_point,
flag,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error,
})?;
validated_stages |= flag;
}
hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: stage.entry_point.as_ref(),
}
};
let fragment_stage = match desc.fragment {
Some(ref fragment) => {
let flag = wgt::ShaderStages::FRAGMENT;
let shader_module =
shader_module_guard
.get(fragment.stage.module)
.map_err(|_| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error: validation::StageError::InvalidModule,
})?;
let provided_layouts = match desc.layout {
Some(pipeline_layout_id) => Some(Device::get_introspection_bind_group_layouts(
pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?,
&*bgl_guard,
)),
None => None,
};
if validated_stages == wgt::ShaderStages::VERTEX {
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
provided_layouts.as_ref().map(|p| p.as_slice()),
&mut derived_group_layouts,
&mut shader_binding_sizes,
&fragment.stage.entry_point,
flag,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage: flag,
error,
})?;
validated_stages |= flag;
}
}
Some(hal::ProgrammableStage {
module: &shader_module.raw,
entry_point: fragment.stage.entry_point.as_ref(),
})
}
None => None,
};
if validated_stages.contains(wgt::ShaderStages::FRAGMENT) {
for (i, output) in io.iter() {
match color_targets.get(*i as usize) {
Some(&Some(ref state)) => {
validation::check_texture_format(state.format, &output.ty).map_err(
|pipeline| {
pipeline::CreateRenderPipelineError::ColorState(
*i as u8,
pipeline::ColorStateError::IncompatibleFormat {
pipeline,
shader: output.ty,
},
)
},
)?;
}
_ => {
log::info!(
"The fragment stage {:?} output @location({}) values are ignored",
fragment_stage
.as_ref()
.map_or("", |stage| stage.entry_point),
i
);
}
}
}
}
let last_stage = match desc.fragment {
Some(_) => wgt::ShaderStages::FRAGMENT,
None => wgt::ShaderStages::VERTEX,
};
if desc.layout.is_none() && !validated_stages.contains(last_stage) {
return Err(pipeline::ImplicitLayoutError::ReflectionError(last_stage).into());
}
let pipeline_layout_id = match desc.layout {
Some(id) => id,
None => self.derive_pipeline_layout(
self_id,
implicit_context,
derived_group_layouts,
&mut *bgl_guard,
&mut *pipeline_layout_guard,
)?,
};
let layout = pipeline_layout_guard
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
if desc.multiview.is_some() {
self.require_features(wgt::Features::MULTIVIEW)?;
}
if !self
.downlevel
.flags
.contains(wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED)
{
for (binding, size) in shader_binding_sizes.iter() {
if size.get() % 16 != 0 {
return Err(pipeline::CreateRenderPipelineError::UnalignedShader {
binding: binding.binding,
group: binding.group,
size: size.get(),
});
}
}
}
let late_sized_buffer_groups =
Device::make_late_sized_buffer_groups(&shader_binding_sizes, layout, &*bgl_guard);
let pipeline_desc = hal::RenderPipelineDescriptor {
label: desc.label.borrow_option(),
layout: &layout.raw,
vertex_buffers: &vertex_buffers,
vertex_stage,
primitive: desc.primitive,
depth_stencil: desc.depth_stencil.clone(),
multisample: desc.multisample,
fragment_stage,
color_targets,
multiview: desc.multiview,
};
let raw =
unsafe { self.raw.create_render_pipeline(&pipeline_desc) }.map_err(
|err| match err {
hal::PipelineError::Device(error) => {
pipeline::CreateRenderPipelineError::Device(error.into())
}
hal::PipelineError::Linkage(stage, msg) => {
pipeline::CreateRenderPipelineError::Internal { stage, error: msg }
}
hal::PipelineError::EntryPoint(stage) => {
pipeline::CreateRenderPipelineError::Internal {
stage: hal::auxil::map_naga_stage(stage),
error: EP_FAILURE.to_string(),
}
}
},
)?;
let pass_context = RenderPassContext {
attachments: AttachmentData {
colors: color_targets
.iter()
.map(|state| state.as_ref().map(|s| s.format))
.collect(),
resolves: ArrayVec::new(),
depth_stencil: depth_stencil_state.as_ref().map(|state| state.format),
},
sample_count: samples,
multiview: desc.multiview,
};
let mut flags = pipeline::PipelineFlags::empty();
for state in color_targets.iter().filter_map(|s| s.as_ref()) {
if let Some(ref bs) = state.blend {
if bs.color.uses_constant() | bs.alpha.uses_constant() {
flags |= pipeline::PipelineFlags::BLEND_CONSTANT;
}
}
}
if let Some(ds) = depth_stencil_state.as_ref() {
if ds.stencil.is_enabled() && ds.stencil.needs_ref_value() {
flags |= pipeline::PipelineFlags::STENCIL_REFERENCE;
}
if !ds.is_depth_read_only() {
flags |= pipeline::PipelineFlags::WRITES_DEPTH;
}
if !ds.is_stencil_read_only(desc.primitive.cull_mode) {
flags |= pipeline::PipelineFlags::WRITES_STENCIL;
}
}
let pipeline = pipeline::RenderPipeline {
raw,
layout_id: Stored {
value: id::Valid(pipeline_layout_id),
ref_count: layout.life_guard.add_ref(),
},
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
pass_context,
flags,
strip_index_format: desc.primitive.strip_index_format,
vertex_steps,
late_sized_buffer_groups,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
};
Ok(pipeline)
}
fn describe_format_features(
&self,
adapter: &Adapter<A>,
format: TextureFormat,
) -> Result<wgt::TextureFormatFeatures, MissingFeatures> {
self.require_features(format.required_features())?;
let using_device_features = self
.features
.contains(wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES);
let downlevel = !self.downlevel.is_webgpu_compliant();
if using_device_features || downlevel {
Ok(adapter.get_texture_format_features(format))
} else {
Ok(format.guaranteed_format_features(self.features))
}
}
fn wait_for_submit(
&self,
submission_index: SubmissionIndex,
token: &mut Token<Self>,
) -> Result<(), WaitIdleError> {
let last_done_index = unsafe {
self.raw
.get_fence_value(&self.fence)
.map_err(DeviceError::from)?
};
if last_done_index < submission_index {
log::info!("Waiting for submission {:?}", submission_index);
unsafe {
self.raw
.wait(&self.fence, submission_index, !0)
.map_err(DeviceError::from)?
};
let closures = self
.lock_life(token)
.triage_submissions(submission_index, &self.command_allocator);
assert!(
closures.is_empty(),
"wait_for_submit is not expected to work with closures"
);
}
Ok(())
}
fn create_query_set(
&self,
self_id: id::DeviceId,
desc: &resource::QuerySetDescriptor,
) -> Result<resource::QuerySet<A>, resource::CreateQuerySetError> {
use resource::CreateQuerySetError as Error;
match desc.ty {
wgt::QueryType::Occlusion => {}
wgt::QueryType::Timestamp => {
self.require_features(wgt::Features::TIMESTAMP_QUERY)?;
}
wgt::QueryType::PipelineStatistics(..) => {
self.require_features(wgt::Features::PIPELINE_STATISTICS_QUERY)?;
}
}
if desc.count == 0 {
return Err(Error::ZeroCount);
}
if desc.count > wgt::QUERY_SET_MAX_QUERIES {
return Err(Error::TooManyQueries {
count: desc.count,
maximum: wgt::QUERY_SET_MAX_QUERIES,
});
}
let hal_desc = desc.map_label(super::LabelHelpers::borrow_option);
Ok(resource::QuerySet {
raw: unsafe { self.raw.create_query_set(&hal_desc).unwrap() },
device_id: Stored {
value: id::Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
life_guard: LifeGuard::new(""),
desc: desc.map_label(|_| ()),
})
}
}
impl<A: HalApi> Device<A> {
pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<A>) {
if let Some(raw) = buffer.raw {
unsafe {
self.raw.destroy_buffer(raw);
}
}
}
pub(crate) fn destroy_command_buffer(&self, cmd_buf: command::CommandBuffer<A>) {
let mut baked = cmd_buf.into_baked();
unsafe {
baked.encoder.reset_all(baked.list.into_iter());
}
unsafe {
self.raw.destroy_command_encoder(baked.encoder);
}
}
pub(crate) fn prepare_to_die(&mut self) {
self.pending_writes.deactivate();
let mut life_tracker = self.life_tracker.lock();
let current_index = self.active_submission_index;
if let Err(error) = unsafe { self.raw.wait(&self.fence, current_index, CLEANUP_WAIT_MS) } {
log::error!("failed to wait for the device: {:?}", error);
}
let _ = life_tracker.triage_submissions(current_index, &self.command_allocator);
life_tracker.cleanup(&self.raw);
#[cfg(feature = "trace")]
{
self.trace = None;
}
}
pub(crate) fn dispose(self) {
self.pending_writes.dispose(&self.raw);
self.command_allocator.into_inner().dispose(&self.raw);
unsafe {
self.raw.destroy_buffer(self.zero_buffer);
self.raw.destroy_fence(self.fence);
self.raw.exit(self.queue);
}
}
}
impl<A: HalApi> crate::hub::Resource for Device<A> {
const TYPE: &'static str = "Device";
fn life_guard(&self) -> &LifeGuard {
&self.life_guard
}
}
#[derive(Clone, Debug, Error)]
#[error("Device is invalid")]
pub struct InvalidDevice;
#[derive(Clone, Debug, Error)]
pub enum DeviceError {
#[error("Parent device is invalid")]
Invalid,
#[error("Parent device is lost")]
Lost,
#[error("Not enough memory left")]
OutOfMemory,
}
impl From<hal::DeviceError> for DeviceError {
fn from(error: hal::DeviceError) -> Self {
match error {
hal::DeviceError::Lost => DeviceError::Lost,
hal::DeviceError::OutOfMemory => DeviceError::OutOfMemory,
}
}
}
#[derive(Clone, Debug, Error)]
#[error("Features {0:?} are required but not enabled on the device")]
pub struct MissingFeatures(pub wgt::Features);
#[derive(Clone, Debug, Error)]
#[error(
"Downlevel flags {0:?} are required but not supported on the device.\n{}",
DOWNLEVEL_ERROR_MESSAGE
)]
pub struct MissingDownlevelFlags(pub wgt::DownlevelFlags);
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ImplicitPipelineContext {
pub root_id: id::PipelineLayoutId,
pub group_ids: ArrayVec<id::BindGroupLayoutId, { hal::MAX_BIND_GROUPS }>,
}
pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
pub root_id: Input<G, id::PipelineLayoutId>,
pub group_ids: &'a [Input<G, id::BindGroupLayoutId>],
}
impl<G: GlobalIdentityHandlerFactory> ImplicitPipelineIds<'_, G> {
fn prepare<A: HalApi>(self, hub: &Hub<A, G>) -> ImplicitPipelineContext {
ImplicitPipelineContext {
root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
group_ids: self
.group_ids
.iter()
.map(|id_in| hub.bind_group_layouts.prepare(id_in.clone()).into_id())
.collect(),
}
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn adapter_is_surface_supported<A: HalApi>(
&self,
adapter_id: id::AdapterId,
surface_id: id::SurfaceId,
) -> Result<bool, instance::IsSurfaceSupportedError> {
let hub = A::hub(self);
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let (adapter_guard, mut _token) = hub.adapters.read(&mut token);
let adapter = adapter_guard
.get(adapter_id)
.map_err(|_| instance::IsSurfaceSupportedError::InvalidAdapter)?;
let surface = surface_guard
.get(surface_id)
.map_err(|_| instance::IsSurfaceSupportedError::InvalidSurface)?;
Ok(adapter.is_surface_supported(surface))
}
pub fn surface_get_capabilities<A: HalApi>(
&self,
surface_id: id::SurfaceId,
adapter_id: id::AdapterId,
) -> Result<wgt::SurfaceCapabilities, instance::GetSurfaceSupportError> {
profiling::scope!("Surface::get_capabilities");
self.fetch_adapter_and_surface::<A, _, _>(surface_id, adapter_id, |adapter, surface| {
let mut hal_caps = surface.get_capabilities(adapter)?;
hal_caps.formats.sort_by_key(|f| !f.is_srgb());
Ok(wgt::SurfaceCapabilities {
formats: hal_caps.formats,
present_modes: hal_caps.present_modes,
alpha_modes: hal_caps.composite_alpha_modes,
})
})
}
fn fetch_adapter_and_surface<
A: HalApi,
F: FnOnce(&Adapter<A>, &Surface) -> Result<B, instance::GetSurfaceSupportError>,
B,
>(
&self,
surface_id: id::SurfaceId,
adapter_id: id::AdapterId,
get_supported_callback: F,
) -> Result<B, instance::GetSurfaceSupportError> {
let hub = A::hub(self);
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let (adapter_guard, mut _token) = hub.adapters.read(&mut token);
let adapter = adapter_guard
.get(adapter_id)
.map_err(|_| instance::GetSurfaceSupportError::InvalidAdapter)?;
let surface = surface_guard
.get(surface_id)
.map_err(|_| instance::GetSurfaceSupportError::InvalidSurface)?;
get_supported_callback(adapter, surface)
}
pub fn device_features<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::Features, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.features)
}
pub fn device_limits<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::Limits, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.limits.clone())
}
pub fn device_downlevel_properties<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<wgt::DownlevelCapabilities, InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
Ok(device.downlevel.clone())
}
pub fn device_create_buffer<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::BufferDescriptor,
id_in: Input<G, id::BufferId>,
) -> (id::BufferId, Option<resource::CreateBufferError>) {
profiling::scope!("Device::create_buffer");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.buffers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut desc = desc.clone();
let mapped_at_creation = mem::replace(&mut desc.mapped_at_creation, false);
if mapped_at_creation && !desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
desc.usage |= wgt::BufferUsages::COPY_DST;
}
trace
.lock()
.add(trace::Action::CreateBuffer(fid.id(), desc));
}
let mut buffer = match device.create_buffer(device_id, desc, false) {
Ok(buffer) => buffer,
Err(e) => break e,
};
let ref_count = buffer.life_guard.add_ref();
let buffer_use = if !desc.mapped_at_creation {
hal::BufferUses::empty()
} else if desc.usage.contains(wgt::BufferUsages::MAP_WRITE) {
let map_size = buffer.size;
let ptr = if map_size == 0 {
std::ptr::NonNull::dangling()
} else {
match map_buffer(&device.raw, &mut buffer, 0, map_size, HostMap::Write) {
Ok(ptr) => ptr,
Err(e) => {
let raw = buffer.raw.unwrap();
device.lock_life(&mut token).schedule_resource_destruction(
queue::TempResource::Buffer(raw),
!0,
);
break e.into();
}
}
};
buffer.map_state = resource::BufferMapState::Active {
ptr,
range: 0..map_size,
host: HostMap::Write,
};
hal::BufferUses::MAP_WRITE
} else {
let stage_desc = wgt::BufferDescriptor {
label: Some(Cow::Borrowed(
"(wgpu internal) initializing unmappable buffer",
)),
size: desc.size,
usage: wgt::BufferUsages::MAP_WRITE | wgt::BufferUsages::COPY_SRC,
mapped_at_creation: false,
};
let mut stage = match device.create_buffer(device_id, &stage_desc, true) {
Ok(stage) => stage,
Err(e) => {
let raw = buffer.raw.unwrap();
device
.lock_life(&mut token)
.schedule_resource_destruction(queue::TempResource::Buffer(raw), !0);
break e;
}
};
let stage_buffer = stage.raw.unwrap();
let mapping = match unsafe { device.raw.map_buffer(&stage_buffer, 0..stage.size) } {
Ok(mapping) => mapping,
Err(e) => {
let raw = buffer.raw.unwrap();
let mut life_lock = device.lock_life(&mut token);
life_lock
.schedule_resource_destruction(queue::TempResource::Buffer(raw), !0);
life_lock.schedule_resource_destruction(
queue::TempResource::Buffer(stage_buffer),
!0,
);
break DeviceError::from(e).into();
}
};
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
unsafe { ptr::write_bytes(mapping.ptr.as_ptr(), 0, buffer.size as usize) };
buffer.initialization_status.drain(0..buffer.size);
stage.initialization_status.drain(0..buffer.size);
buffer.map_state = resource::BufferMapState::Init {
ptr: mapping.ptr,
needs_flush: !mapping.is_coherent,
stage_buffer,
};
hal::BufferUses::COPY_DST
};
let id = fid.assign(buffer, &mut token);
log::info!("Created buffer {:?} with {:?}", id, desc);
device
.trackers
.lock()
.buffers
.insert_single(id, ref_count, buffer_use);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn create_buffer_error<A: HalApi>(&self, id_in: Input<G, id::BufferId>, label: Label) {
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.buffers.prepare(id_in);
let (_, mut token) = hub.devices.read(&mut token);
fid.assign_error(label.borrow_or_default(), &mut token);
}
pub fn create_texture_error<A: HalApi>(&self, id_in: Input<G, id::TextureId>, label: Label) {
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (_, mut token) = hub.devices.read(&mut token);
fid.assign_error(label.borrow_or_default(), &mut token);
}
#[cfg(feature = "replay")]
pub fn device_wait_for_buffer<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
) -> Result<(), WaitIdleError> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let last_submission = {
let (buffer_guard, _) = hub.buffers.write(&mut token);
match buffer_guard.get(buffer_id) {
Ok(buffer) => buffer.life_guard.life_count(),
Err(_) => return Ok(()),
}
};
device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?
.wait_for_submit(last_submission, &mut token)
}
#[doc(hidden)]
pub fn device_set_buffer_sub_data<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
offset: BufferAddress,
data: &[u8],
) -> BufferAccessResult {
profiling::scope!("Device::set_buffer_sub_data");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let device = device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data: data_path,
range: offset..offset + data.len() as BufferAddress,
queued: false,
});
}
let raw_buf = buffer.raw.as_ref().unwrap();
unsafe {
let mapping = device
.raw
.map_buffer(raw_buf, offset..offset + data.len() as u64)
.map_err(DeviceError::from)?;
ptr::copy_nonoverlapping(data.as_ptr(), mapping.ptr.as_ptr(), data.len());
if !mapping.is_coherent {
device
.raw
.flush_mapped_ranges(raw_buf, iter::once(offset..offset + data.len() as u64));
}
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
Ok(())
}
#[doc(hidden)]
pub fn device_get_buffer_sub_data<A: HalApi>(
&self,
device_id: id::DeviceId,
buffer_id: id::BufferId,
offset: BufferAddress,
data: &mut [u8],
) -> BufferAccessResult {
profiling::scope!("Device::get_buffer_sub_data");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let device = device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?;
let raw_buf = buffer.raw.as_ref().unwrap();
unsafe {
let mapping = device
.raw
.map_buffer(raw_buf, offset..offset + data.len() as u64)
.map_err(DeviceError::from)?;
if !mapping.is_coherent {
device.raw.invalidate_mapped_ranges(
raw_buf,
iter::once(offset..offset + data.len() as u64),
);
}
ptr::copy_nonoverlapping(mapping.ptr.as_ptr(), data.as_mut_ptr(), data.len());
device
.raw
.unmap_buffer(raw_buf)
.map_err(DeviceError::from)?;
}
Ok(())
}
pub fn buffer_label<A: HalApi>(&self, id: id::BufferId) -> String {
A::hub(self).buffers.label_for_resource(id)
}
pub fn buffer_destroy<A: HalApi>(
&self,
buffer_id: id::BufferId,
) -> Result<(), resource::DestroyError> {
profiling::scope!("Buffer::destroy");
let map_closure;
{
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
log::info!("Buffer {:?} is destroyed", buffer_id);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];
map_closure = match &buffer.map_state {
&BufferMapState::Waiting(..) | &BufferMapState::Init { .. }
| &BufferMapState::Active { .. }
=> {
self.buffer_unmap_inner(buffer_id, buffer, device)
.unwrap_or(None)
}
_ => None,
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeBuffer(buffer_id));
}
let raw = buffer
.raw
.take()
.ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Buffer(raw);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
device.pending_writes.temp_resources.push(temp);
} else {
let last_submit_index = buffer.life_guard.life_count();
drop(buffer_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
}
}
if let Some((operation, status)) = map_closure {
operation.callback.call(status);
}
Ok(())
}
pub fn buffer_drop<A: HalApi>(&self, buffer_id: id::BufferId, wait: bool) {
profiling::scope!("Buffer::drop");
log::debug!("buffer {:?} is dropped", buffer_id);
let hub = A::hub(self);
let mut token = Token::root();
let (ref_count, last_submit_index, device_id) = {
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
match buffer_guard.get_mut(buffer_id) {
Ok(buffer) => {
let ref_count = buffer.life_guard.ref_count.take().unwrap();
let last_submit_index = buffer.life_guard.life_count();
(ref_count, last_submit_index, buffer.device_id.value)
}
Err(InvalidId) => {
hub.buffers.unregister_locked(buffer_id, &mut *buffer_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
{
let mut life_lock = device.lock_life(&mut token);
if device.pending_writes.dst_buffers.contains(&buffer_id) {
life_lock.future_suspected_buffers.push(Stored {
value: id::Valid(buffer_id),
ref_count,
});
} else {
drop(ref_count);
life_lock
.suspected_resources
.buffers
.push(id::Valid(buffer_id));
}
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for buffer {:?}: {:?}", buffer_id, e),
}
}
}
pub fn device_create_texture<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::TextureDescriptor,
id_in: Input<G, id::TextureId>,
) -> (id::TextureId, Option<resource::CreateTextureError>) {
profiling::scope!("Device::create_texture");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateTexture(fid.id(), desc.clone()));
}
let adapter = &adapter_guard[device.adapter_id.value];
let texture = match device.create_texture(device_id, adapter, desc) {
Ok(texture) => texture,
Err(error) => break error,
};
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub unsafe fn create_texture_from_hal<A: HalApi>(
&self,
hal_texture: A::Texture,
device_id: id::DeviceId,
desc: &resource::TextureDescriptor,
id_in: Input<G, id::TextureId>,
) -> (id::TextureId, Option<resource::CreateTextureError>) {
profiling::scope!("Device::create_texture");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.textures.prepare(id_in);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateTexture(fid.id(), desc.clone()));
}
let adapter = &adapter_guard[device.adapter_id.value];
let format_features = match device
.describe_format_features(adapter, desc.format)
.map_err(|error| resource::CreateTextureError::MissingFeatures(desc.format, error))
{
Ok(features) => features,
Err(error) => break error,
};
let mut texture = device.create_texture_from_hal(
hal_texture,
conv::map_texture_usage(desc.usage, desc.format.into()),
device_id,
desc,
format_features,
resource::TextureClearMode::None,
);
if desc.usage.contains(wgt::TextureUsages::COPY_DST) {
texture.hal_usage |= hal::TextureUses::COPY_DST;
}
texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0);
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn texture_label<A: HalApi>(&self, id: id::TextureId) -> String {
A::hub(self).textures.label_for_resource(id)
}
pub fn texture_destroy<A: HalApi>(
&self,
texture_id: id::TextureId,
) -> Result<(), resource::DestroyError> {
profiling::scope!("Texture::destroy");
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
log::info!("Buffer {:?} is destroyed", texture_id);
let (mut texture_guard, _) = hub.textures.write(&mut token);
let texture = texture_guard
.get_mut(texture_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let device = &mut device_guard[texture.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::FreeTexture(texture_id));
}
let last_submit_index = texture.life_guard.life_count();
let clear_views =
match std::mem::replace(&mut texture.clear_mode, resource::TextureClearMode::None) {
resource::TextureClearMode::BufferCopy => SmallVec::new(),
resource::TextureClearMode::RenderPass { clear_views, .. } => clear_views,
resource::TextureClearMode::None => SmallVec::new(),
};
match texture.inner {
resource::TextureInner::Native { ref mut raw } => {
let raw = raw.take().ok_or(resource::DestroyError::AlreadyDestroyed)?;
let temp = queue::TempResource::Texture(raw, clear_views);
if device.pending_writes.dst_textures.contains(&texture_id) {
device.pending_writes.temp_resources.push(temp);
} else {
drop(texture_guard);
device
.lock_life(&mut token)
.schedule_resource_destruction(temp, last_submit_index);
}
}
resource::TextureInner::Surface { .. } => {
for clear_view in clear_views {
unsafe {
device.raw.destroy_texture_view(clear_view);
}
}
}
}
Ok(())
}
pub fn texture_drop<A: HalApi>(&self, texture_id: id::TextureId, wait: bool) {
profiling::scope!("Texture::drop");
log::debug!("texture {:?} is dropped", texture_id);
let hub = A::hub(self);
let mut token = Token::root();
let (ref_count, last_submit_index, device_id) = {
let (mut texture_guard, _) = hub.textures.write(&mut token);
match texture_guard.get_mut(texture_id) {
Ok(texture) => {
let ref_count = texture.life_guard.ref_count.take().unwrap();
let last_submit_index = texture.life_guard.life_count();
(ref_count, last_submit_index, texture.device_id.value)
}
Err(InvalidId) => {
hub.textures
.unregister_locked(texture_id, &mut *texture_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
{
let mut life_lock = device.lock_life(&mut token);
if device.pending_writes.dst_textures.contains(&texture_id) {
life_lock.future_suspected_textures.push(Stored {
value: id::Valid(texture_id),
ref_count,
});
} else {
drop(ref_count);
life_lock
.suspected_resources
.textures
.push(id::Valid(texture_id));
}
}
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!("Failed to wait for texture {:?}: {:?}", texture_id, e),
}
}
}
pub fn texture_create_view<A: HalApi>(
&self,
texture_id: id::TextureId,
desc: &resource::TextureViewDescriptor,
id_in: Input<G, id::TextureViewId>,
) -> (id::TextureViewId, Option<resource::CreateTextureViewError>) {
profiling::scope!("Texture::create_view");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.texture_views.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
let error = loop {
let texture = match texture_guard.get(texture_id) {
Ok(texture) => texture,
Err(_) => break resource::CreateTextureViewError::InvalidTexture,
};
let device = &device_guard[texture.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateTextureView {
id: fid.id(),
parent_id: texture_id,
desc: desc.clone(),
});
}
let view = match device.create_texture_view(texture, texture_id, desc) {
Ok(view) => view,
Err(e) => break e,
};
let ref_count = view.life_guard.add_ref();
let id = fid.assign(view, &mut token);
device.trackers.lock().views.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn texture_view_label<A: HalApi>(&self, id: id::TextureViewId) -> String {
A::hub(self).texture_views.label_for_resource(id)
}
pub fn texture_view_drop<A: HalApi>(
&self,
texture_view_id: id::TextureViewId,
wait: bool,
) -> Result<(), resource::TextureViewDestroyError> {
profiling::scope!("TextureView::drop");
log::debug!("texture view {:?} is dropped", texture_view_id);
let hub = A::hub(self);
let mut token = Token::root();
let (last_submit_index, device_id) = {
let (mut texture_view_guard, _) = hub.texture_views.write(&mut token);
match texture_view_guard.get_mut(texture_view_id) {
Ok(view) => {
let _ref_count = view.life_guard.ref_count.take();
let last_submit_index = view.life_guard.life_count();
(last_submit_index, view.device_id.value)
}
Err(InvalidId) => {
hub.texture_views
.unregister_locked(texture_view_id, &mut *texture_view_guard);
return Ok(());
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
device
.lock_life(&mut token)
.suspected_resources
.texture_views
.push(id::Valid(texture_view_id));
if wait {
match device.wait_for_submit(last_submit_index, &mut token) {
Ok(()) => (),
Err(e) => log::error!(
"Failed to wait for texture view {:?}: {:?}",
texture_view_id,
e
),
}
}
Ok(())
}
pub fn device_create_sampler<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::SamplerDescriptor,
id_in: Input<G, id::SamplerId>,
) -> (id::SamplerId, Option<resource::CreateSamplerError>) {
profiling::scope!("Device::create_sampler");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.samplers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateSampler(fid.id(), desc.clone()));
}
let sampler = match device.create_sampler(device_id, desc) {
Ok(sampler) => sampler,
Err(e) => break e,
};
let ref_count = sampler.life_guard.add_ref();
let id = fid.assign(sampler, &mut token);
device.trackers.lock().samplers.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn sampler_label<A: HalApi>(&self, id: id::SamplerId) -> String {
A::hub(self).samplers.label_for_resource(id)
}
pub fn sampler_drop<A: HalApi>(&self, sampler_id: id::SamplerId) {
profiling::scope!("Sampler::drop");
log::debug!("sampler {:?} is dropped", sampler_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut sampler_guard, _) = hub.samplers.write(&mut token);
match sampler_guard.get_mut(sampler_id) {
Ok(sampler) => {
sampler.life_guard.ref_count.take();
sampler.device_id.value
}
Err(InvalidId) => {
hub.samplers
.unregister_locked(sampler_id, &mut *sampler_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.samplers
.push(id::Valid(sampler_id));
}
pub fn device_create_bind_group_layout<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::BindGroupLayoutDescriptor,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::CreateBindGroupLayoutError>,
) {
profiling::scope!("Device::create_bind_group_layout");
let mut token = Token::root();
let hub = A::hub(self);
let fid = hub.bind_group_layouts.prepare(id_in);
let error = 'outer: loop {
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone()));
}
let mut entry_map = FastHashMap::default();
for entry in desc.entries.iter() {
if entry.binding > device.limits.max_bindings_per_bind_group {
break 'outer binding_model::CreateBindGroupLayoutError::InvalidBindingIndex {
binding: entry.binding,
maximum: device.limits.max_bindings_per_bind_group,
};
}
if entry_map.insert(entry.binding, *entry).is_some() {
break 'outer binding_model::CreateBindGroupLayoutError::ConflictBinding(
entry.binding,
);
}
}
if mem::size_of::<Input<G, id::BindGroupLayoutId>>() == 0 {
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
if let Some(id) =
Device::deduplicate_bind_group_layout(device_id, &entry_map, &*bgl_guard)
{
return (id, None);
}
}
let layout = match device.create_bind_group_layout(
device_id,
desc.label.borrow_option(),
entry_map,
) {
Ok(layout) => layout,
Err(e) => break e,
};
let id = fid.assign(layout, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn bind_group_layout_label<A: HalApi>(&self, id: id::BindGroupLayoutId) -> String {
A::hub(self).bind_group_layouts.label_for_resource(id)
}
pub fn bind_group_layout_drop<A: HalApi>(&self, bind_group_layout_id: id::BindGroupLayoutId) {
profiling::scope!("BindGroupLayout::drop");
log::debug!("bind group layout {:?} is dropped", bind_group_layout_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut bind_group_layout_guard, _) = hub.bind_group_layouts.write(&mut token);
match bind_group_layout_guard.get_mut(bind_group_layout_id) {
Ok(layout) => layout.device_id.value,
Err(InvalidId) => {
hub.bind_group_layouts
.unregister_locked(bind_group_layout_id, &mut *bind_group_layout_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.bind_group_layouts
.push(id::Valid(bind_group_layout_id));
}
pub fn device_create_pipeline_layout<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::PipelineLayoutDescriptor,
id_in: Input<G, id::PipelineLayoutId>,
) -> (
id::PipelineLayoutId,
Option<binding_model::CreatePipelineLayoutError>,
) {
profiling::scope!("Device::create_pipeline_layout");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.pipeline_layouts.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreatePipelineLayout(fid.id(), desc.clone()));
}
let layout = {
let (bgl_guard, _) = hub.bind_group_layouts.read(&mut token);
match device.create_pipeline_layout(device_id, desc, &*bgl_guard) {
Ok(layout) => layout,
Err(e) => break e,
}
};
let id = fid.assign(layout, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn pipeline_layout_label<A: HalApi>(&self, id: id::PipelineLayoutId) -> String {
A::hub(self).pipeline_layouts.label_for_resource(id)
}
pub fn pipeline_layout_drop<A: HalApi>(&self, pipeline_layout_id: id::PipelineLayoutId) {
profiling::scope!("PipelineLayout::drop");
log::debug!("pipeline layout {:?} is dropped", pipeline_layout_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_id, ref_count) = {
let (mut pipeline_layout_guard, _) = hub.pipeline_layouts.write(&mut token);
match pipeline_layout_guard.get_mut(pipeline_layout_id) {
Ok(layout) => (
layout.device_id.value,
layout.life_guard.ref_count.take().unwrap(),
),
Err(InvalidId) => {
hub.pipeline_layouts
.unregister_locked(pipeline_layout_id, &mut *pipeline_layout_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.pipeline_layouts
.push(Stored {
value: id::Valid(pipeline_layout_id),
ref_count,
});
}
pub fn device_create_bind_group<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &binding_model::BindGroupDescriptor,
id_in: Input<G, id::BindGroupId>,
) -> (id::BindGroupId, Option<binding_model::CreateBindGroupError>) {
profiling::scope!("Device::create_bind_group");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.bind_groups.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let (bind_group_layout_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::CreateBindGroup(fid.id(), desc.clone()));
}
let bind_group_layout = match bind_group_layout_guard.get(desc.layout) {
Ok(layout) => layout,
Err(_) => break binding_model::CreateBindGroupError::InvalidLayout,
};
let bind_group =
match device.create_bind_group(device_id, bind_group_layout, desc, hub, &mut token)
{
Ok(bind_group) => bind_group,
Err(e) => break e,
};
let ref_count = bind_group.life_guard.add_ref();
let id = fid.assign(bind_group, &mut token);
log::debug!("Bind group {:?}", id,);
device
.trackers
.lock()
.bind_groups
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn bind_group_label<A: HalApi>(&self, id: id::BindGroupId) -> String {
A::hub(self).bind_groups.label_for_resource(id)
}
pub fn bind_group_drop<A: HalApi>(&self, bind_group_id: id::BindGroupId) {
profiling::scope!("BindGroup::drop");
log::debug!("bind group {:?} is dropped", bind_group_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut bind_group_guard, _) = hub.bind_groups.write(&mut token);
match bind_group_guard.get_mut(bind_group_id) {
Ok(bind_group) => {
bind_group.life_guard.ref_count.take();
bind_group.device_id.value
}
Err(InvalidId) => {
hub.bind_groups
.unregister_locked(bind_group_id, &mut *bind_group_guard);
return;
}
}
};
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.bind_groups
.push(id::Valid(bind_group_id));
}
pub fn device_create_shader_module<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
source: pipeline::ShaderModuleSource,
id_in: Input<G, id::ShaderModuleId>,
) -> (
id::ShaderModuleId,
Option<pipeline::CreateShaderModuleError>,
) {
profiling::scope!("Device::create_shader_module");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.shader_modules.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = match source {
#[cfg(feature = "wgsl")]
pipeline::ShaderModuleSource::Wgsl(ref code) => {
trace.make_binary("wgsl", code.as_bytes())
}
pipeline::ShaderModuleSource::Naga(ref module) => {
let string =
ron::ser::to_string_pretty(module, ron::ser::PrettyConfig::default())
.unwrap();
trace.make_binary("ron", string.as_bytes())
}
pipeline::ShaderModuleSource::Dummy(_) => {
panic!("found `ShaderModuleSource::Dummy`")
}
};
trace.add(trace::Action::CreateShaderModule {
id: fid.id(),
desc: desc.clone(),
data,
});
};
let shader = match device.create_shader_module(device_id, desc, source) {
Ok(shader) => shader,
Err(e) => break e,
};
let id = fid.assign(shader, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
#[allow(unused_unsafe)]
pub unsafe fn device_create_shader_module_spirv<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ShaderModuleDescriptor,
source: Cow<[u32]>,
id_in: Input<G, id::ShaderModuleId>,
) -> (
id::ShaderModuleId,
Option<pipeline::CreateShaderModuleError>,
) {
profiling::scope!("Device::create_shader_module");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.shader_modules.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = trace.make_binary("spv", unsafe {
std::slice::from_raw_parts(source.as_ptr() as *const u8, source.len() * 4)
});
trace.add(trace::Action::CreateShaderModule {
id: fid.id(),
desc: desc.clone(),
data,
});
};
let shader =
match unsafe { device.create_shader_module_spirv(device_id, desc, &source) } {
Ok(shader) => shader,
Err(e) => break e,
};
let id = fid.assign(shader, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn shader_module_label<A: HalApi>(&self, id: id::ShaderModuleId) -> String {
A::hub(self).shader_modules.label_for_resource(id)
}
pub fn shader_module_drop<A: HalApi>(&self, shader_module_id: id::ShaderModuleId) {
profiling::scope!("ShaderModule::drop");
log::debug!("shader module {:?} is dropped", shader_module_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (module, _) = hub.shader_modules.unregister(shader_module_id, &mut token);
if let Some(module) = module {
let device = &device_guard[module.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::DestroyShaderModule(shader_module_id));
}
unsafe {
device.raw.destroy_shader_module(module.raw);
}
}
}
pub fn device_create_command_encoder<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor<Label>,
id_in: Input<G, id::CommandEncoderId>,
) -> (id::CommandEncoderId, Option<DeviceError>) {
profiling::scope!("Device::create_command_encoder");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.command_buffers.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid,
};
let dev_stored = Stored {
value: id::Valid(device_id),
ref_count: device.life_guard.add_ref(),
};
let encoder = match device
.command_allocator
.lock()
.acquire_encoder(&device.raw, &device.queue)
{
Ok(raw) => raw,
Err(_) => break DeviceError::OutOfMemory,
};
let command_buffer = command::CommandBuffer::new(
encoder,
dev_stored,
device.limits.clone(),
device.downlevel.clone(),
device.features,
#[cfg(feature = "trace")]
device.trace.is_some(),
&desc.label,
);
let id = fid.assign(command_buffer, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn command_buffer_label<A: HalApi>(&self, id: id::CommandBufferId) -> String {
A::hub(self).command_buffers.label_for_resource(id)
}
pub fn command_encoder_drop<A: HalApi>(&self, command_encoder_id: id::CommandEncoderId) {
profiling::scope!("CommandEncoder::drop");
log::debug!("command encoder {:?} is dropped", command_encoder_id);
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (cmdbuf, _) = hub
.command_buffers
.unregister(command_encoder_id, &mut token);
if let Some(cmdbuf) = cmdbuf {
let device = &mut device_guard[cmdbuf.device_id.value];
device.untrack::<G>(hub, &cmdbuf.trackers, &mut token);
device.destroy_command_buffer(cmdbuf);
}
}
pub fn command_buffer_drop<A: HalApi>(&self, command_buffer_id: id::CommandBufferId) {
profiling::scope!("CommandBuffer::drop");
log::debug!("command buffer {:?} is dropped", command_buffer_id);
self.command_encoder_drop::<A>(command_buffer_id)
}
pub fn device_create_render_bundle_encoder(
&self,
device_id: id::DeviceId,
desc: &command::RenderBundleEncoderDescriptor,
) -> (
id::RenderBundleEncoderId,
Option<command::CreateRenderBundleError>,
) {
profiling::scope!("Device::create_render_bundle_encoder");
let (encoder, error) = match command::RenderBundleEncoder::new(desc, device_id, None) {
Ok(encoder) => (encoder, None),
Err(e) => (command::RenderBundleEncoder::dummy(device_id), Some(e)),
};
(Box::into_raw(Box::new(encoder)), error)
}
pub fn render_bundle_encoder_finish<A: HalApi>(
&self,
bundle_encoder: command::RenderBundleEncoder,
desc: &command::RenderBundleDescriptor,
id_in: Input<G, id::RenderBundleId>,
) -> (id::RenderBundleId, Option<command::RenderBundleError>) {
profiling::scope!("RenderBundleEncoder::finish");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.render_bundles.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(bundle_encoder.parent()) {
Ok(device) => device,
Err(_) => break command::RenderBundleError::INVALID_DEVICE,
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateRenderBundle {
id: fid.id(),
desc: trace::new_render_bundle_encoder_descriptor(
desc.label.clone(),
&bundle_encoder.context,
bundle_encoder.is_depth_read_only,
bundle_encoder.is_stencil_read_only,
),
base: bundle_encoder.to_base_pass(),
});
}
let render_bundle = match bundle_encoder.finish(desc, device, hub, &mut token) {
Ok(bundle) => bundle,
Err(e) => break e,
};
log::debug!("Render bundle");
let ref_count = render_bundle.life_guard.add_ref();
let id = fid.assign(render_bundle, &mut token);
device.trackers.lock().bundles.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn render_bundle_label<A: HalApi>(&self, id: id::RenderBundleId) -> String {
A::hub(self).render_bundles.label_for_resource(id)
}
pub fn render_bundle_drop<A: HalApi>(&self, render_bundle_id: id::RenderBundleId) {
profiling::scope!("RenderBundle::drop");
log::debug!("render bundle {:?} is dropped", render_bundle_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device_id = {
let (mut bundle_guard, _) = hub.render_bundles.write(&mut token);
match bundle_guard.get_mut(render_bundle_id) {
Ok(bundle) => {
bundle.life_guard.ref_count.take();
bundle.device_id.value
}
Err(InvalidId) => {
hub.render_bundles
.unregister_locked(render_bundle_id, &mut *bundle_guard);
return;
}
}
};
device_guard[device_id]
.lock_life(&mut token)
.suspected_resources
.render_bundles
.push(id::Valid(render_bundle_id));
}
pub fn device_create_query_set<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &resource::QuerySetDescriptor,
id_in: Input<G, id::QuerySetId>,
) -> (id::QuerySetId, Option<resource::CreateQuerySetError>) {
profiling::scope!("Device::create_query_set");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.query_sets.prepare(id_in);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateQuerySet {
id: fid.id(),
desc: desc.clone(),
});
}
let query_set = match device.create_query_set(device_id, desc) {
Ok(query_set) => query_set,
Err(err) => break err,
};
let ref_count = query_set.life_guard.add_ref();
let id = fid.assign(query_set, &mut token);
device
.trackers
.lock()
.query_sets
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error("", &mut token);
(id, Some(error))
}
pub fn query_set_drop<A: HalApi>(&self, query_set_id: id::QuerySetId) {
profiling::scope!("QuerySet::drop");
log::debug!("query set {:?} is dropped", query_set_id);
let hub = A::hub(self);
let mut token = Token::root();
let device_id = {
let (mut query_set_guard, _) = hub.query_sets.write(&mut token);
let query_set = query_set_guard.get_mut(query_set_id).unwrap();
query_set.life_guard.ref_count.take();
query_set.device_id.value
};
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = &device_guard[device_id];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::DestroyQuerySet(query_set_id));
}
device
.lock_life(&mut token)
.suspected_resources
.query_sets
.push(id::Valid(query_set_id));
}
pub fn query_set_label<A: HalApi>(&self, id: id::QuerySetId) -> String {
A::hub(self).query_sets.label_for_resource(id)
}
pub fn device_create_render_pipeline<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::RenderPipelineDescriptor,
id_in: Input<G, id::RenderPipelineId>,
implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
) -> (
id::RenderPipelineId,
Option<pipeline::CreateRenderPipelineError>,
) {
profiling::scope!("Device::create_render_pipeline");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.render_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
let adapter = &adapter_guard[device.adapter_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateRenderPipeline {
id: fid.id(),
desc: desc.clone(),
implicit_context: implicit_context.clone(),
});
}
let pipeline = match device.create_render_pipeline(
device_id,
adapter,
desc,
implicit_context,
hub,
&mut token,
) {
Ok(pair) => pair,
Err(e) => break e,
};
let ref_count = pipeline.life_guard.add_ref();
let id = fid.assign(pipeline, &mut token);
log::info!("Created render pipeline {:?} with {:?}", id, desc);
device
.trackers
.lock()
.render_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn render_pipeline_get_bind_group_layout<A: HalApi>(
&self,
pipeline_id: id::RenderPipelineId,
index: u32,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::GetBindGroupLayoutError>,
) {
let hub = A::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let error = loop {
let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let (_, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, _) = hub.render_pipelines.read(&mut token);
let pipeline = match pipeline_guard.get(pipeline_id) {
Ok(pipeline) => pipeline,
Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
};
let id = match pipeline_layout_guard[pipeline.layout_id.value]
.bind_group_layout_ids
.get(index as usize)
{
Some(id) => id,
None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
};
bgl_guard[*id].multi_ref_count.inc();
return (id.0, None);
};
let id = hub
.bind_group_layouts
.prepare(id_in)
.assign_error("<derived>", &mut token);
(id, Some(error))
}
pub fn render_pipeline_label<A: HalApi>(&self, id: id::RenderPipelineId) -> String {
A::hub(self).render_pipelines.label_for_resource(id)
}
pub fn render_pipeline_drop<A: HalApi>(&self, render_pipeline_id: id::RenderPipelineId) {
profiling::scope!("RenderPipeline::drop");
log::debug!("render pipeline {:?} is dropped", render_pipeline_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (device_id, layout_id) = {
let (mut pipeline_guard, _) = hub.render_pipelines.write(&mut token);
match pipeline_guard.get_mut(render_pipeline_id) {
Ok(pipeline) => {
pipeline.life_guard.ref_count.take();
(pipeline.device_id.value, pipeline.layout_id.clone())
}
Err(InvalidId) => {
hub.render_pipelines
.unregister_locked(render_pipeline_id, &mut *pipeline_guard);
return;
}
}
};
let mut life_lock = device_guard[device_id].lock_life(&mut token);
life_lock
.suspected_resources
.render_pipelines
.push(id::Valid(render_pipeline_id));
life_lock
.suspected_resources
.pipeline_layouts
.push(layout_id);
}
pub fn device_create_compute_pipeline<A: HalApi>(
&self,
device_id: id::DeviceId,
desc: &pipeline::ComputePipelineDescriptor,
id_in: Input<G, id::ComputePipelineId>,
implicit_pipeline_ids: Option<ImplicitPipelineIds<G>>,
) -> (
id::ComputePipelineId,
Option<pipeline::CreateComputePipelineError>,
) {
profiling::scope!("Device::create_compute_pipeline");
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.compute_pipelines.prepare(id_in);
let implicit_context = implicit_pipeline_ids.map(|ipi| ipi.prepare(hub));
let (device_guard, mut token) = hub.devices.read(&mut token);
let error = loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(trace::Action::CreateComputePipeline {
id: fid.id(),
desc: desc.clone(),
implicit_context: implicit_context.clone(),
});
}
let pipeline = match device.create_compute_pipeline(
device_id,
desc,
implicit_context,
hub,
&mut token,
) {
Ok(pair) => pair,
Err(e) => break e,
};
let ref_count = pipeline.life_guard.add_ref();
let id = fid.assign(pipeline, &mut token);
log::info!("Created compute pipeline {:?} with {:?}", id, desc);
device
.trackers
.lock()
.compute_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
pub fn compute_pipeline_get_bind_group_layout<A: HalApi>(
&self,
pipeline_id: id::ComputePipelineId,
index: u32,
id_in: Input<G, id::BindGroupLayoutId>,
) -> (
id::BindGroupLayoutId,
Option<binding_model::GetBindGroupLayoutError>,
) {
let hub = A::hub(self);
let mut token = Token::root();
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let error = loop {
let (bgl_guard, mut token) = hub.bind_group_layouts.read(&mut token);
let (_, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, _) = hub.compute_pipelines.read(&mut token);
let pipeline = match pipeline_guard.get(pipeline_id) {
Ok(pipeline) => pipeline,
Err(_) => break binding_model::GetBindGroupLayoutError::InvalidPipeline,
};
let id = match pipeline_layout_guard[pipeline.layout_id.value]
.bind_group_layout_ids
.get(index as usize)
{
Some(id) => id,
None => break binding_model::GetBindGroupLayoutError::InvalidGroupIndex(index),
};
bgl_guard[*id].multi_ref_count.inc();
return (id.0, None);
};
let id = hub
.bind_group_layouts
.prepare(id_in)
.assign_error("<derived>", &mut token);
(id, Some(error))
}
pub fn compute_pipeline_label<A: HalApi>(&self, id: id::ComputePipelineId) -> String {
A::hub(self).compute_pipelines.label_for_resource(id)
}
pub fn compute_pipeline_drop<A: HalApi>(&self, compute_pipeline_id: id::ComputePipelineId) {
profiling::scope!("ComputePipeline::drop");
log::debug!("compute pipeline {:?} is dropped", compute_pipeline_id);
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (device_id, layout_id) = {
let (mut pipeline_guard, _) = hub.compute_pipelines.write(&mut token);
match pipeline_guard.get_mut(compute_pipeline_id) {
Ok(pipeline) => {
pipeline.life_guard.ref_count.take();
(pipeline.device_id.value, pipeline.layout_id.clone())
}
Err(InvalidId) => {
hub.compute_pipelines
.unregister_locked(compute_pipeline_id, &mut *pipeline_guard);
return;
}
}
};
let mut life_lock = device_guard[device_id].lock_life(&mut token);
life_lock
.suspected_resources
.compute_pipelines
.push(id::Valid(compute_pipeline_id));
life_lock
.suspected_resources
.pipeline_layouts
.push(layout_id);
}
pub fn surface_configure<A: HalApi>(
&self,
surface_id: id::SurfaceId,
device_id: id::DeviceId,
config: &wgt::SurfaceConfiguration<Vec<TextureFormat>>,
) -> Option<present::ConfigureSurfaceError> {
use hal::{Adapter as _, Surface as _};
use present::ConfigureSurfaceError as E;
profiling::scope!("surface_configure");
fn validate_surface_configuration(
config: &mut hal::SurfaceConfiguration,
caps: &hal::SurfaceCapabilities,
) -> Result<(), E> {
let width = config.extent.width;
let height = config.extent.height;
if width < caps.extents.start().width
|| width > caps.extents.end().width
|| height < caps.extents.start().height
|| height > caps.extents.end().height
{
log::warn!(
"Requested size {}x{} is outside of the supported range: {:?}",
width,
height,
caps.extents
);
}
if !caps.present_modes.contains(&config.present_mode) {
let new_mode = 'b: loop {
let fallbacks = match config.present_mode {
wgt::PresentMode::AutoVsync => {
&[wgt::PresentMode::FifoRelaxed, wgt::PresentMode::Fifo][..]
}
wgt::PresentMode::AutoNoVsync => &[
wgt::PresentMode::Immediate,
wgt::PresentMode::Mailbox,
wgt::PresentMode::Fifo,
][..],
_ => {
return Err(E::UnsupportedPresentMode {
requested: config.present_mode,
available: caps.present_modes.clone(),
});
}
};
for &fallback in fallbacks {
if caps.present_modes.contains(&fallback) {
break 'b fallback;
}
}
unreachable!("Fallback system failed to choose present mode. This is a bug. Mode: {:?}, Options: {:?}", config.present_mode, &caps.present_modes);
};
log::info!(
"Automatically choosing presentation mode by rule {:?}. Chose {new_mode:?}",
config.present_mode
);
config.present_mode = new_mode;
}
if !caps.formats.contains(&config.format) {
return Err(E::UnsupportedFormat {
requested: config.format,
available: caps.formats.clone(),
});
}
if !caps
.composite_alpha_modes
.contains(&config.composite_alpha_mode)
{
let new_alpha_mode = 'alpha: loop {
let fallbacks = match config.composite_alpha_mode {
wgt::CompositeAlphaMode::Auto => &[
wgt::CompositeAlphaMode::Opaque,
wgt::CompositeAlphaMode::Inherit,
][..],
_ => {
return Err(E::UnsupportedAlphaMode {
requested: config.composite_alpha_mode,
available: caps.composite_alpha_modes.clone(),
});
}
};
for &fallback in fallbacks {
if caps.composite_alpha_modes.contains(&fallback) {
break 'alpha fallback;
}
}
unreachable!(
"Fallback system failed to choose alpha mode. This is a bug. \
AlphaMode: {:?}, Options: {:?}",
config.composite_alpha_mode, &caps.composite_alpha_modes
);
};
log::info!(
"Automatically choosing alpha mode by rule {:?}. Chose {new_alpha_mode:?}",
config.composite_alpha_mode
);
config.composite_alpha_mode = new_alpha_mode;
}
if !caps.usage.contains(config.usage) {
return Err(E::UnsupportedUsage);
}
if width == 0 || height == 0 {
return Err(E::ZeroArea);
}
Ok(())
}
log::info!("configuring surface with {:?}", config);
let hub = A::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let (device_guard, _token) = hub.devices.read(&mut token);
let error = 'outer: loop {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break DeviceError::Invalid.into(),
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace
.lock()
.add(trace::Action::ConfigureSurface(surface_id, config.clone()));
}
let surface = match surface_guard.get_mut(surface_id) {
Ok(surface) => surface,
Err(_) => break E::InvalidSurface,
};
let caps = unsafe {
let suf = A::get_surface(surface);
let adapter = &adapter_guard[device.adapter_id.value];
match adapter.raw.adapter.surface_capabilities(&suf.unwrap().raw) {
Some(caps) => caps,
None => break E::UnsupportedQueueFamily,
}
};
let mut hal_view_formats = vec![];
for format in config.view_formats.iter() {
if *format == config.format {
continue;
}
if !caps.formats.contains(&config.format) {
break 'outer E::UnsupportedFormat {
requested: config.format,
available: caps.formats.clone(),
};
}
if config.format.remove_srgb_suffix() != format.remove_srgb_suffix() {
break 'outer E::InvalidViewFormat(*format, config.format);
}
hal_view_formats.push(*format);
}
if !hal_view_formats.is_empty() {
if let Err(missing_flag) =
device.require_downlevel_flags(wgt::DownlevelFlags::SURFACE_VIEW_FORMATS)
{
break 'outer E::MissingDownlevelFlags(missing_flag);
}
}
let num_frames = present::DESIRED_NUM_FRAMES
.clamp(*caps.swap_chain_sizes.start(), *caps.swap_chain_sizes.end());
let mut hal_config = hal::SurfaceConfiguration {
swap_chain_size: num_frames,
present_mode: config.present_mode,
composite_alpha_mode: config.alpha_mode,
format: config.format,
extent: wgt::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
},
usage: conv::map_texture_usage(config.usage, hal::FormatAspects::COLOR),
view_formats: hal_view_formats,
};
if let Err(error) = validate_surface_configuration(&mut hal_config, &caps) {
break error;
}
match unsafe {
A::get_surface_mut(surface)
.unwrap()
.raw
.configure(&device.raw, &hal_config)
} {
Ok(()) => (),
Err(error) => {
break match error {
hal::SurfaceError::Outdated | hal::SurfaceError::Lost => E::InvalidSurface,
hal::SurfaceError::Device(error) => E::Device(error.into()),
hal::SurfaceError::Other(message) => {
log::error!("surface configuration failed: {}", message);
E::InvalidSurface
}
}
}
}
if let Some(present) = surface.presentation.take() {
if present.acquired_texture.is_some() {
break E::PreviousOutputExists;
}
}
surface.presentation = Some(present::Presentation {
device_id: Stored {
value: id::Valid(device_id),
ref_count: device.life_guard.add_ref(),
},
config: config.clone(),
num_frames,
acquired_texture: None,
});
return None;
};
Some(error)
}
#[cfg(feature = "replay")]
pub fn device_maintain_ids<A: HalApi>(
&self,
device_id: id::DeviceId,
) -> Result<(), InvalidDevice> {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let device = device_guard.get(device_id).map_err(|_| InvalidDevice)?;
device.lock_life(&mut token).triage_suspected(
hub,
&device.trackers,
#[cfg(feature = "trace")]
None,
&mut token,
);
Ok(())
}
pub fn device_poll<A: HalApi>(
&self,
device_id: id::DeviceId,
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
) -> Result<bool, WaitIdleError> {
let (closures, queue_empty) = {
if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
if submission_index.queue_id != device_id {
return Err(WaitIdleError::WrongSubmissionIndex(
submission_index.queue_id,
device_id,
));
}
}
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
device_guard
.get(device_id)
.map_err(|_| DeviceError::Invalid)?
.maintain(hub, maintain, &mut token)?
};
closures.fire();
Ok(queue_empty)
}
fn poll_devices<A: HalApi>(
&self,
force_wait: bool,
closures: &mut UserClosures,
) -> Result<bool, WaitIdleError> {
profiling::scope!("poll_devices");
let hub = A::hub(self);
let mut devices_to_drop = vec![];
let mut all_queue_empty = true;
{
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
for (id, device) in device_guard.iter(A::VARIANT) {
let maintain = if force_wait {
wgt::Maintain::Wait
} else {
wgt::Maintain::Poll
};
let (cbs, queue_empty) = device.maintain(hub, maintain, &mut token)?;
all_queue_empty = all_queue_empty && queue_empty;
if queue_empty && device.ref_count.load() == 1 {
devices_to_drop.push(id);
}
closures.extend(cbs);
}
}
for device_id in devices_to_drop {
self.exit_device::<A>(device_id);
}
Ok(all_queue_empty)
}
pub fn poll_all_devices(&self, force_wait: bool) -> Result<bool, WaitIdleError> {
let mut closures = UserClosures::default();
let mut all_queue_empty = true;
#[cfg(all(feature = "vulkan", not(target_arch = "wasm32")))]
{
all_queue_empty = self.poll_devices::<hal::api::Vulkan>(force_wait, &mut closures)?
&& all_queue_empty;
}
#[cfg(all(feature = "metal", any(target_os = "macos", target_os = "ios")))]
{
all_queue_empty =
self.poll_devices::<hal::api::Metal>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(all(feature = "dx12", windows))]
{
all_queue_empty =
self.poll_devices::<hal::api::Dx12>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(all(feature = "dx11", windows))]
{
all_queue_empty =
self.poll_devices::<hal::api::Dx11>(force_wait, &mut closures)? && all_queue_empty;
}
#[cfg(feature = "gles")]
{
all_queue_empty =
self.poll_devices::<hal::api::Gles>(force_wait, &mut closures)? && all_queue_empty;
}
closures.fire();
Ok(all_queue_empty)
}
pub fn device_label<A: HalApi>(&self, id: id::DeviceId) -> String {
A::hub(self).devices.label_for_resource(id)
}
pub fn device_start_capture<A: HalApi>(&self, id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
if let Ok(device) = device_guard.get(id) {
unsafe { device.raw.start_capture() };
}
}
pub fn device_stop_capture<A: HalApi>(&self, id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
if let Ok(device) = device_guard.get(id) {
unsafe { device.raw.stop_capture() };
}
}
pub fn device_drop<A: HalApi>(&self, device_id: id::DeviceId) {
profiling::scope!("Device::drop");
log::debug!("device {:?} is dropped", device_id);
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, _) = hub.devices.write(&mut token);
if let Ok(device) = device_guard.get_mut(device_id) {
device.life_guard.ref_count.take().unwrap();
}
}
fn exit_device<A: HalApi>(&self, device_id: id::DeviceId) {
let hub = A::hub(self);
let mut token = Token::root();
let mut free_adapter_id = None;
{
let (device, mut _token) = hub.devices.unregister(device_id, &mut token);
if let Some(mut device) = device {
debug_assert!(device.lock_life(&mut _token).queue_empty());
device.pending_writes.deactivate();
if device.adapter_id.ref_count.load() == 1 {
free_adapter_id = Some(device.adapter_id.value.0);
}
device.dispose();
}
}
if let Some(free_adapter_id) = free_adapter_id {
let _ = hub.adapters.unregister(free_adapter_id, &mut token);
}
}
pub fn buffer_map_async<A: HalApi>(
&self,
buffer_id: id::BufferId,
range: Range<BufferAddress>,
op: BufferMapOperation,
) -> BufferAccessResult {
if let Err((op, err)) = self.buffer_map_async_inner::<A>(buffer_id, range, op) {
op.callback.call(Err(err.clone()));
return Err(err);
}
Ok(())
}
fn buffer_map_async_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
range: Range<BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
profiling::scope!("Buffer::map_async");
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (pub_usage, internal_use) = match op.host {
HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ),
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRange));
}
let (device_id, ref_count) = {
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| BufferAccessError::Invalid);
let buffer = match buffer {
Ok(b) => b,
Err(e) => {
return Err((op, e));
}
};
if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) {
return Err((op, e.into()));
}
if range.start > range.end {
return Err((
op,
BufferAccessError::NegativeRange {
start: range.start,
end: range.end,
},
));
}
if range.end > buffer.size {
return Err((
op,
BufferAccessError::OutOfBoundsOverrun {
index: range.end,
max: buffer.size,
},
));
}
buffer.map_state = match buffer.map_state {
resource::BufferMapState::Init { .. } | resource::BufferMapState::Active { .. } => {
return Err((op, BufferAccessError::AlreadyMapped));
}
resource::BufferMapState::Waiting(_) => {
return Err((op, BufferAccessError::MapAlreadyPending));
}
resource::BufferMapState::Idle => {
resource::BufferMapState::Waiting(resource::BufferPendingMapping {
range,
op,
_parent_ref_count: buffer.life_guard.add_ref(),
})
}
};
log::debug!("Buffer {:?} map state -> Waiting", buffer_id);
let device = &device_guard[buffer.device_id.value];
let ret = (buffer.device_id.value, buffer.life_guard.add_ref());
let mut trackers = device.trackers.lock();
trackers
.buffers
.set_single(&*buffer_guard, buffer_id, internal_use);
trackers.buffers.drain();
ret
};
let device = &device_guard[device_id];
device
.lock_life(&mut token)
.map(id::Valid(buffer_id), ref_count);
Ok(())
}
pub fn buffer_get_mapped_range<A: HalApi>(
&self,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferAddress>,
) -> Result<(*mut u8, u64), BufferAccessError> {
profiling::scope!("Buffer::get_mapped_range");
let hub = A::hub(self);
let mut token = Token::root();
let (buffer_guard, _) = hub.buffers.read(&mut token);
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
let range_size = if let Some(size) = size {
size
} else if offset > buffer.size {
0
} else {
buffer.size - offset
};
if offset % wgt::MAP_ALIGNMENT != 0 {
return Err(BufferAccessError::UnalignedOffset { offset });
}
if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(BufferAccessError::UnalignedRangeSize { range_size });
}
match buffer.map_state {
resource::BufferMapState::Init { ptr, .. } => {
if offset + range_size > buffer.size {
return Err(BufferAccessError::OutOfBoundsOverrun {
index: offset + range_size - 1,
max: buffer.size,
});
}
unsafe { Ok((ptr.as_ptr().offset(offset as isize), range_size)) }
}
resource::BufferMapState::Active { ptr, ref range, .. } => {
if offset < range.start {
return Err(BufferAccessError::OutOfBoundsUnderrun {
index: offset,
min: range.start,
});
}
if offset + range_size > range.end {
return Err(BufferAccessError::OutOfBoundsOverrun {
index: offset + range_size - 1,
max: range.end,
});
}
let relative_offset = (offset - range.start) as isize;
unsafe { Ok((ptr.as_ptr().offset(relative_offset), range_size)) }
}
resource::BufferMapState::Idle | resource::BufferMapState::Waiting(_) => {
Err(BufferAccessError::NotMapped)
}
}
}
fn buffer_unmap_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
buffer: &mut resource::Buffer<A>,
device: &mut Device<A>,
) -> Result<Option<BufferMapPendingClosure>, BufferAccessError> {
log::debug!("Buffer {:?} map state -> Idle", buffer_id);
match mem::replace(&mut buffer.map_state, resource::BufferMapState::Idle) {
resource::BufferMapState::Init {
ptr,
stage_buffer,
needs_flush,
} => {
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data = trace.make_binary("bin", unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), buffer.size as usize)
});
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data,
range: 0..buffer.size,
queued: true,
});
}
let _ = ptr;
if needs_flush {
unsafe {
device
.raw
.flush_mapped_ranges(&stage_buffer, iter::once(0..buffer.size));
}
}
let raw_buf = buffer.raw.as_ref().ok_or(BufferAccessError::Destroyed)?;
buffer.life_guard.use_at(device.active_submission_index + 1);
let region = wgt::BufferSize::new(buffer.size).map(|size| hal::BufferCopy {
src_offset: 0,
dst_offset: 0,
size,
});
let transition_src = hal::BufferBarrier {
buffer: &stage_buffer,
usage: hal::BufferUses::MAP_WRITE..hal::BufferUses::COPY_SRC,
};
let transition_dst = hal::BufferBarrier {
buffer: raw_buf,
usage: hal::BufferUses::empty()..hal::BufferUses::COPY_DST,
};
let encoder = device.pending_writes.activate();
unsafe {
encoder.transition_buffers(
iter::once(transition_src).chain(iter::once(transition_dst)),
);
if buffer.size > 0 {
encoder.copy_buffer_to_buffer(&stage_buffer, raw_buf, region.into_iter());
}
}
device
.pending_writes
.consume_temp(queue::TempResource::Buffer(stage_buffer));
device.pending_writes.dst_buffers.insert(buffer_id);
}
resource::BufferMapState::Idle => {
return Err(BufferAccessError::NotMapped);
}
resource::BufferMapState::Waiting(pending) => {
return Ok(Some((pending.op, Err(BufferAccessError::MapAborted))));
}
resource::BufferMapState::Active { ptr, range, host } => {
if host == HostMap::Write {
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let size = range.end - range.start;
let data = trace.make_binary("bin", unsafe {
std::slice::from_raw_parts(ptr.as_ptr(), size as usize)
});
trace.add(trace::Action::WriteBuffer {
id: buffer_id,
data,
range: range.clone(),
queued: false,
});
}
let _ = (ptr, range);
}
unsafe {
device
.raw
.unmap_buffer(buffer.raw.as_ref().unwrap())
.map_err(DeviceError::from)?
};
}
}
Ok(None)
}
pub fn buffer_unmap<A: HalApi>(&self, buffer_id: id::BufferId) -> BufferAccessResult {
profiling::scope!("unmap", "Buffer");
let closure;
{
let hub = A::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let buffer = buffer_guard
.get_mut(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
let device = &mut device_guard[buffer.device_id.value];
closure = self.buffer_unmap_inner(buffer_id, buffer, device)
}
if let Some((operation, status)) = closure? {
operation.callback.call(status);
}
Ok(())
}
}