wgpu_hal/gles/
device.rs

1use alloc::{
2    borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15    naga::ShaderStage,
16    &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21    layout: &'a super::PipelineLayout,
22    sampler_map: &'a mut super::SamplerBindMap,
23    name_binding_map: &'a mut NameBindingMap,
24    push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
25    multiview: Option<NonZeroU32>,
26}
27
28impl CompilationContext<'_> {
29    fn consume_reflection(
30        self,
31        gl: &glow::Context,
32        module: &naga::Module,
33        ep_info: &naga::valid::FunctionInfo,
34        reflection_info: naga::back::glsl::ReflectionInfo,
35        naga_stage: naga::ShaderStage,
36        program: glow::Program,
37    ) {
38        for (handle, var) in module.global_variables.iter() {
39            if ep_info[handle].is_empty() {
40                continue;
41            }
42            let register = match var.space {
43                naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
44                naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
45                _ => continue,
46            };
47
48            let br = var.binding.as_ref().unwrap();
49            let slot = self.layout.get_slot(br);
50
51            let name = match reflection_info.uniforms.get(&handle) {
52                Some(name) => name.clone(),
53                None => continue,
54            };
55            log::trace!(
56                "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
57                var.name.as_ref(),
58                &name,
59                register,
60                slot
61            );
62            self.name_binding_map.insert(name, (register, slot));
63        }
64
65        for (name, mapping) in reflection_info.texture_mapping {
66            let var = &module.global_variables[mapping.texture];
67            let register = match module.types[var.ty].inner {
68                naga::TypeInner::Image {
69                    class: naga::ImageClass::Storage { .. },
70                    ..
71                } => super::BindingRegister::Images,
72                _ => super::BindingRegister::Textures,
73            };
74
75            let tex_br = var.binding.as_ref().unwrap();
76            let texture_linear_index = self.layout.get_slot(tex_br);
77
78            self.name_binding_map
79                .insert(name, (register, texture_linear_index));
80            if let Some(sampler_handle) = mapping.sampler {
81                let sam_br = module.global_variables[sampler_handle]
82                    .binding
83                    .as_ref()
84                    .unwrap();
85                let sampler_linear_index = self.layout.get_slot(sam_br);
86                self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
87            }
88        }
89
90        for (name, location) in reflection_info.varying {
91            match naga_stage {
92                naga::ShaderStage::Vertex => {
93                    assert_eq!(location.index, 0);
94                    unsafe { gl.bind_attrib_location(program, location.location, &name) }
95                }
96                naga::ShaderStage::Fragment => {
97                    assert_eq!(location.index, 0);
98                    unsafe { gl.bind_frag_data_location(program, location.location, &name) }
99                }
100                naga::ShaderStage::Compute => {}
101                naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
102            }
103        }
104
105        *self.push_constant_items = reflection_info.push_constant_items;
106    }
107}
108
109impl super::Device {
110    /// # Safety
111    ///
112    /// - `name` must be created respecting `desc`
113    /// - `name` must be a texture
114    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the texture. If
115    ///   `drop_callback` is [`Some`], the texture must be valid until the callback is called.
116    #[cfg(any(native, Emscripten))]
117    pub unsafe fn texture_from_raw(
118        &self,
119        name: NonZeroU32,
120        desc: &crate::TextureDescriptor,
121        drop_callback: Option<crate::DropCallback>,
122    ) -> super::Texture {
123        super::Texture {
124            inner: super::TextureInner::Texture {
125                raw: glow::NativeTexture(name),
126                target: super::Texture::get_info_from_desc(desc),
127            },
128            drop_guard: crate::DropGuard::from_option(drop_callback),
129            mip_level_count: desc.mip_level_count,
130            array_layer_count: desc.array_layer_count(),
131            format: desc.format,
132            format_desc: self.shared.describe_texture_format(desc.format),
133            copy_size: desc.copy_extent(),
134        }
135    }
136
137    /// # Safety
138    ///
139    /// - `name` must be created respecting `desc`
140    /// - `name` must be a renderbuffer
141    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the renderbuffer. If
142    ///   `drop_callback` is [`Some`], the renderbuffer must be valid until the callback is called.
143    #[cfg(any(native, Emscripten))]
144    pub unsafe fn texture_from_raw_renderbuffer(
145        &self,
146        name: NonZeroU32,
147        desc: &crate::TextureDescriptor,
148        drop_callback: Option<crate::DropCallback>,
149    ) -> super::Texture {
150        super::Texture {
151            inner: super::TextureInner::Renderbuffer {
152                raw: glow::NativeRenderbuffer(name),
153            },
154            drop_guard: crate::DropGuard::from_option(drop_callback),
155            mip_level_count: desc.mip_level_count,
156            array_layer_count: desc.array_layer_count(),
157            format: desc.format,
158            format_desc: self.shared.describe_texture_format(desc.format),
159            copy_size: desc.copy_extent(),
160        }
161    }
162
163    unsafe fn compile_shader(
164        gl: &glow::Context,
165        shader: &str,
166        naga_stage: naga::ShaderStage,
167        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
168    ) -> Result<glow::Shader, crate::PipelineError> {
169        let target = match naga_stage {
170            naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
171            naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
172            naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
173            naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
174        };
175
176        let raw = unsafe { gl.create_shader(target) }.unwrap();
177        #[cfg(native)]
178        if gl.supports_debug() {
179            let name = raw.0.get();
180            unsafe { gl.object_label(glow::SHADER, name, label) };
181        }
182
183        unsafe { gl.shader_source(raw, shader) };
184        unsafe { gl.compile_shader(raw) };
185
186        log::debug!("\tCompiled shader {:?}", raw);
187
188        let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
189        let msg = unsafe { gl.get_shader_info_log(raw) };
190        if compiled_ok {
191            if !msg.is_empty() {
192                log::warn!("\tCompile: {}", msg);
193            }
194            Ok(raw)
195        } else {
196            log::error!("\tShader compilation failed: {}", msg);
197            unsafe { gl.delete_shader(raw) };
198            Err(crate::PipelineError::Linkage(
199                map_naga_stage(naga_stage),
200                msg,
201            ))
202        }
203    }
204
205    fn create_shader(
206        gl: &glow::Context,
207        naga_stage: naga::ShaderStage,
208        stage: &crate::ProgrammableStage<super::ShaderModule>,
209        context: CompilationContext,
210        program: glow::Program,
211    ) -> Result<glow::Shader, crate::PipelineError> {
212        use naga::back::glsl;
213        let pipeline_options = glsl::PipelineOptions {
214            shader_stage: naga_stage,
215            entry_point: stage.entry_point.to_owned(),
216            multiview: context.multiview,
217        };
218
219        let (module, info) = naga::back::pipeline_constants::process_overrides(
220            &stage.module.naga.module,
221            &stage.module.naga.info,
222            stage.constants,
223        )
224        .map_err(|e| {
225            let msg = format!("{e}");
226            crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
227        })?;
228
229        let entry_point_index = module
230            .entry_points
231            .iter()
232            .position(|ep| ep.name.as_str() == stage.entry_point)
233            .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
234
235        use naga::proc::BoundsCheckPolicy;
236        // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 4.3+.
237        let version = gl.version();
238        let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
239            BoundsCheckPolicy::ReadZeroSkipWrite
240        } else {
241            BoundsCheckPolicy::Unchecked
242        };
243
244        // Other bounds check are either provided by glsl or not implemented yet.
245        let policies = naga::proc::BoundsCheckPolicies {
246            index: BoundsCheckPolicy::Unchecked,
247            buffer: BoundsCheckPolicy::Unchecked,
248            image_load: image_check,
249            binding_array: BoundsCheckPolicy::Unchecked,
250        };
251
252        let mut output = String::new();
253        let needs_temp_options = stage.zero_initialize_workgroup_memory
254            != context.layout.naga_options.zero_initialize_workgroup_memory;
255        let mut temp_options;
256        let naga_options = if needs_temp_options {
257            // We use a conditional here, as cloning the naga_options could be expensive
258            // That is, we want to avoid doing that unless we cannot avoid it
259            temp_options = context.layout.naga_options.clone();
260            temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
261            &temp_options
262        } else {
263            &context.layout.naga_options
264        };
265        let mut writer = glsl::Writer::new(
266            &mut output,
267            &module,
268            &info,
269            naga_options,
270            &pipeline_options,
271            policies,
272        )
273        .map_err(|e| {
274            let msg = format!("{e}");
275            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
276        })?;
277
278        let reflection_info = writer.write().map_err(|e| {
279            let msg = format!("{e}");
280            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
281        })?;
282
283        log::debug!("Naga generated shader:\n{}", output);
284
285        context.consume_reflection(
286            gl,
287            &module,
288            info.get_entry_point(entry_point_index),
289            reflection_info,
290            naga_stage,
291            program,
292        );
293
294        unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
295    }
296
297    unsafe fn create_pipeline<'a>(
298        &self,
299        gl: &glow::Context,
300        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
301        layout: &super::PipelineLayout,
302        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
303        multiview: Option<NonZeroU32>,
304    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
305        let mut program_stages = ArrayVec::new();
306        let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
307        for group in &*layout.group_infos {
308            group_to_binding_to_slot.push(group.binding_to_slot.clone());
309        }
310        for &(naga_stage, stage) in &shaders {
311            program_stages.push(super::ProgramStage {
312                naga_stage: naga_stage.to_owned(),
313                shader_id: stage.module.id,
314                entry_point: stage.entry_point.to_owned(),
315                zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
316            });
317        }
318        let mut guard = self
319            .shared
320            .program_cache
321            .try_lock()
322            .expect("Couldn't acquire program_cache lock");
323        // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
324        // The only place that destroys a pipeline is also locking on `program_cache`
325        let program = guard
326            .entry(super::ProgramCacheKey {
327                stages: program_stages,
328                group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
329            })
330            .or_insert_with(|| unsafe {
331                Self::create_program(
332                    gl,
333                    shaders,
334                    layout,
335                    label,
336                    multiview,
337                    self.shared.shading_language_version,
338                    self.shared.private_caps,
339                )
340            })
341            .to_owned()?;
342        drop(guard);
343
344        Ok(program)
345    }
346
347    unsafe fn create_program<'a>(
348        gl: &glow::Context,
349        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
350        layout: &super::PipelineLayout,
351        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
352        multiview: Option<NonZeroU32>,
353        glsl_version: naga::back::glsl::Version,
354        private_caps: PrivateCapabilities,
355    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
356        let glsl_version = match glsl_version {
357            naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
358            naga::back::glsl::Version::Desktop(version) => format!("{version}"),
359        };
360        let program = unsafe { gl.create_program() }.unwrap();
361        #[cfg(native)]
362        if let Some(label) = label {
363            if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
364                let name = program.0.get();
365                unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
366            }
367        }
368
369        let mut name_binding_map = NameBindingMap::default();
370        let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
371        let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
372        let mut has_stages = wgt::ShaderStages::empty();
373        let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
374
375        for &(naga_stage, stage) in &shaders {
376            has_stages |= map_naga_stage(naga_stage);
377            let pc_item = {
378                push_constant_items.push(Vec::new());
379                push_constant_items.last_mut().unwrap()
380            };
381            let context = CompilationContext {
382                layout,
383                sampler_map: &mut sampler_map,
384                name_binding_map: &mut name_binding_map,
385                push_constant_items: pc_item,
386                multiview,
387            };
388
389            let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
390            shaders_to_delete.push(shader);
391        }
392
393        // Create empty fragment shader if only vertex shader is present
394        if has_stages == wgt::ShaderStages::VERTEX {
395            let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
396            log::info!("Only vertex shader is present. Creating an empty fragment shader",);
397            let shader = unsafe {
398                Self::compile_shader(
399                    gl,
400                    &shader_src,
401                    naga::ShaderStage::Fragment,
402                    Some("(wgpu internal) dummy fragment shader"),
403                )
404            }?;
405            shaders_to_delete.push(shader);
406        }
407
408        for &shader in shaders_to_delete.iter() {
409            unsafe { gl.attach_shader(program, shader) };
410        }
411        unsafe { gl.link_program(program) };
412
413        for shader in shaders_to_delete {
414            unsafe { gl.delete_shader(shader) };
415        }
416
417        log::debug!("\tLinked program {:?}", program);
418
419        let linked_ok = unsafe { gl.get_program_link_status(program) };
420        let msg = unsafe { gl.get_program_info_log(program) };
421        if !linked_ok {
422            return Err(crate::PipelineError::Linkage(has_stages, msg));
423        }
424        if !msg.is_empty() {
425            log::warn!("\tLink: {}", msg);
426        }
427
428        if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
429            // This remapping is only needed if we aren't able to put the binding layout
430            // in the shader. We can't remap storage buffers this way.
431            unsafe { gl.use_program(Some(program)) };
432            for (ref name, (register, slot)) in name_binding_map {
433                log::trace!("Get binding {:?} from program {:?}", name, program);
434                match register {
435                    super::BindingRegister::UniformBuffers => {
436                        let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
437                        log::trace!("\tBinding slot {slot} to block index {index}");
438                        unsafe { gl.uniform_block_binding(program, index, slot as _) };
439                    }
440                    super::BindingRegister::StorageBuffers => {
441                        let index =
442                            unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
443                        log::error!(
444                            "Unable to re-map shader storage block {} to {}",
445                            name,
446                            index
447                        );
448                        return Err(crate::DeviceError::Lost.into());
449                    }
450                    super::BindingRegister::Textures | super::BindingRegister::Images => {
451                        let location = unsafe { gl.get_uniform_location(program, name) };
452                        unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
453                    }
454                }
455            }
456        }
457
458        let mut uniforms = ArrayVec::new();
459
460        for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
461            for item in stage_items {
462                let naga_module = &shaders[stage_idx].1.module.naga.module;
463                let type_inner = &naga_module.types[item.ty].inner;
464
465                let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
466
467                log::trace!(
468                    "push constant item: name={}, ty={:?}, offset={}, location={:?}",
469                    item.access_path,
470                    type_inner,
471                    item.offset,
472                    location,
473                );
474
475                if let Some(location) = location {
476                    uniforms.push(super::PushConstantDesc {
477                        location,
478                        offset: item.offset,
479                        size_bytes: type_inner.size(naga_module.to_ctx()),
480                        ty: type_inner.clone(),
481                    });
482                }
483            }
484        }
485
486        let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
487            // If this returns none (the uniform isn't active), that's fine, we just won't set it.
488            unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
489        } else {
490            None
491        };
492
493        Ok(Arc::new(super::PipelineInner {
494            program,
495            sampler_map,
496            first_instance_location,
497            push_constant_descs: uniforms,
498        }))
499    }
500}
501
502impl crate::Device for super::Device {
503    type A = super::Api;
504
505    unsafe fn create_buffer(
506        &self,
507        desc: &crate::BufferDescriptor,
508    ) -> Result<super::Buffer, crate::DeviceError> {
509        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
510            glow::ELEMENT_ARRAY_BUFFER
511        } else {
512            glow::ARRAY_BUFFER
513        };
514
515        let emulate_map = self
516            .shared
517            .workarounds
518            .contains(super::Workarounds::EMULATE_BUFFER_MAP)
519            || !self
520                .shared
521                .private_caps
522                .contains(PrivateCapabilities::BUFFER_ALLOCATION);
523
524        if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
525            return Ok(super::Buffer {
526                raw: None,
527                target,
528                size: desc.size,
529                map_flags: 0,
530                data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
531                offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
532            });
533        }
534
535        let gl = &self.shared.context.lock();
536
537        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
538            glow::ELEMENT_ARRAY_BUFFER
539        } else {
540            glow::ARRAY_BUFFER
541        };
542
543        let is_host_visible = desc
544            .usage
545            .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
546        let is_coherent = desc
547            .memory_flags
548            .contains(crate::MemoryFlags::PREFER_COHERENT);
549
550        let mut map_flags = 0;
551        if desc.usage.contains(wgt::BufferUses::MAP_READ) {
552            map_flags |= glow::MAP_READ_BIT;
553        }
554        if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
555            map_flags |= glow::MAP_WRITE_BIT;
556        }
557
558        let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
559        unsafe { gl.bind_buffer(target, raw) };
560        let raw_size = desc
561            .size
562            .try_into()
563            .map_err(|_| crate::DeviceError::OutOfMemory)?;
564
565        if self
566            .shared
567            .private_caps
568            .contains(PrivateCapabilities::BUFFER_ALLOCATION)
569        {
570            if is_host_visible {
571                map_flags |= glow::MAP_PERSISTENT_BIT;
572                if is_coherent {
573                    map_flags |= glow::MAP_COHERENT_BIT;
574                }
575            }
576            // TODO: may also be required for other calls involving `buffer_sub_data_u8_slice` (e.g. copy buffer to buffer and clear buffer)
577            if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
578                map_flags |= glow::DYNAMIC_STORAGE_BIT;
579            }
580            unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
581        } else {
582            assert!(!is_coherent);
583            let usage = if is_host_visible {
584                if desc.usage.contains(wgt::BufferUses::MAP_READ) {
585                    glow::STREAM_READ
586                } else {
587                    glow::DYNAMIC_DRAW
588                }
589            } else {
590                // Even if the usage doesn't contain SRC_READ, we update it internally at least once
591                // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
592                // https://github.com/gfx-rs/wgpu/issues/3371
593                glow::DYNAMIC_DRAW
594            };
595            unsafe { gl.buffer_data_size(target, raw_size, usage) };
596        }
597
598        unsafe { gl.bind_buffer(target, None) };
599
600        if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
601            map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
602        }
603        //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
604
605        #[cfg(native)]
606        if let Some(label) = desc.label {
607            if self
608                .shared
609                .private_caps
610                .contains(PrivateCapabilities::DEBUG_FNS)
611            {
612                let name = raw.map_or(0, |buf| buf.0.get());
613                unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
614            }
615        }
616
617        let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
618            Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
619        } else {
620            None
621        };
622
623        self.counters.buffers.add(1);
624
625        Ok(super::Buffer {
626            raw,
627            target,
628            size: desc.size,
629            map_flags,
630            data,
631            offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
632        })
633    }
634
635    unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
636        if let Some(raw) = buffer.raw {
637            let gl = &self.shared.context.lock();
638            unsafe { gl.delete_buffer(raw) };
639        }
640
641        self.counters.buffers.sub(1);
642    }
643
644    unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
645        self.counters.buffers.add(1);
646    }
647
648    unsafe fn map_buffer(
649        &self,
650        buffer: &super::Buffer,
651        range: crate::MemoryRange,
652    ) -> Result<crate::BufferMapping, crate::DeviceError> {
653        let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
654        let ptr = match buffer.raw {
655            None => {
656                let mut vec = lock(buffer.data.as_ref().unwrap());
657                let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
658                slice.as_mut_ptr()
659            }
660            Some(raw) => {
661                let gl = &self.shared.context.lock();
662                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
663                let ptr = if let Some(ref map_read_allocation) = buffer.data {
664                    let mut guard = lock(map_read_allocation);
665                    let slice = guard.as_mut_slice();
666                    unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
667                    slice.as_mut_ptr()
668                } else {
669                    *lock(&buffer.offset_of_current_mapping) = range.start;
670                    unsafe {
671                        gl.map_buffer_range(
672                            buffer.target,
673                            range.start as i32,
674                            (range.end - range.start) as i32,
675                            buffer.map_flags,
676                        )
677                    }
678                };
679                unsafe { gl.bind_buffer(buffer.target, None) };
680                ptr
681            }
682        };
683        Ok(crate::BufferMapping {
684            ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
685            is_coherent,
686        })
687    }
688    unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
689        if let Some(raw) = buffer.raw {
690            if buffer.data.is_none() {
691                let gl = &self.shared.context.lock();
692                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
693                unsafe { gl.unmap_buffer(buffer.target) };
694                unsafe { gl.bind_buffer(buffer.target, None) };
695                *lock(&buffer.offset_of_current_mapping) = 0;
696            }
697        }
698    }
699    unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
700    where
701        I: Iterator<Item = crate::MemoryRange>,
702    {
703        if let Some(raw) = buffer.raw {
704            if buffer.data.is_none() {
705                let gl = &self.shared.context.lock();
706                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
707                for range in ranges {
708                    let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
709                    unsafe {
710                        gl.flush_mapped_buffer_range(
711                            buffer.target,
712                            (range.start - offset_of_current_mapping) as i32,
713                            (range.end - range.start) as i32,
714                        )
715                    };
716                }
717            }
718        }
719    }
720    unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
721        //TODO: do we need to do anything?
722    }
723
724    unsafe fn create_texture(
725        &self,
726        desc: &crate::TextureDescriptor,
727    ) -> Result<super::Texture, crate::DeviceError> {
728        let gl = &self.shared.context.lock();
729
730        let render_usage = wgt::TextureUses::COLOR_TARGET
731            | wgt::TextureUses::DEPTH_STENCIL_WRITE
732            | wgt::TextureUses::DEPTH_STENCIL_READ;
733        let format_desc = self.shared.describe_texture_format(desc.format);
734
735        let inner = if render_usage.contains(desc.usage)
736            && desc.dimension == wgt::TextureDimension::D2
737            && desc.size.depth_or_array_layers == 1
738        {
739            let raw = unsafe { gl.create_renderbuffer().unwrap() };
740            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
741            if desc.sample_count > 1 {
742                unsafe {
743                    gl.renderbuffer_storage_multisample(
744                        glow::RENDERBUFFER,
745                        desc.sample_count as i32,
746                        format_desc.internal,
747                        desc.size.width as i32,
748                        desc.size.height as i32,
749                    )
750                };
751            } else {
752                unsafe {
753                    gl.renderbuffer_storage(
754                        glow::RENDERBUFFER,
755                        format_desc.internal,
756                        desc.size.width as i32,
757                        desc.size.height as i32,
758                    )
759                };
760            }
761
762            #[cfg(native)]
763            if let Some(label) = desc.label {
764                if self
765                    .shared
766                    .private_caps
767                    .contains(PrivateCapabilities::DEBUG_FNS)
768                {
769                    let name = raw.0.get();
770                    unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
771                }
772            }
773
774            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
775            super::TextureInner::Renderbuffer { raw }
776        } else {
777            let raw = unsafe { gl.create_texture().unwrap() };
778            let target = super::Texture::get_info_from_desc(desc);
779
780            unsafe { gl.bind_texture(target, Some(raw)) };
781            //Note: this has to be done before defining the storage!
782            match desc.format.sample_type(None, Some(self.shared.features)) {
783                Some(
784                    wgt::TextureSampleType::Float { filterable: false }
785                    | wgt::TextureSampleType::Uint
786                    | wgt::TextureSampleType::Sint,
787                ) => {
788                    // reset default filtering mode
789                    unsafe {
790                        gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
791                    };
792                    unsafe {
793                        gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
794                    };
795                }
796                _ => {}
797            }
798
799            if conv::is_layered_target(target) {
800                unsafe {
801                    if self
802                        .shared
803                        .private_caps
804                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
805                    {
806                        gl.tex_storage_3d(
807                            target,
808                            desc.mip_level_count as i32,
809                            format_desc.internal,
810                            desc.size.width as i32,
811                            desc.size.height as i32,
812                            desc.size.depth_or_array_layers as i32,
813                        )
814                    } else if target == glow::TEXTURE_3D {
815                        let mut width = desc.size.width;
816                        let mut height = desc.size.width;
817                        let mut depth = desc.size.depth_or_array_layers;
818                        for i in 0..desc.mip_level_count {
819                            gl.tex_image_3d(
820                                target,
821                                i as i32,
822                                format_desc.internal as i32,
823                                width as i32,
824                                height as i32,
825                                depth as i32,
826                                0,
827                                format_desc.external,
828                                format_desc.data_type,
829                                glow::PixelUnpackData::Slice(None),
830                            );
831                            width = max(1, width / 2);
832                            height = max(1, height / 2);
833                            depth = max(1, depth / 2);
834                        }
835                    } else {
836                        let mut width = desc.size.width;
837                        let mut height = desc.size.width;
838                        for i in 0..desc.mip_level_count {
839                            gl.tex_image_3d(
840                                target,
841                                i as i32,
842                                format_desc.internal as i32,
843                                width as i32,
844                                height as i32,
845                                desc.size.depth_or_array_layers as i32,
846                                0,
847                                format_desc.external,
848                                format_desc.data_type,
849                                glow::PixelUnpackData::Slice(None),
850                            );
851                            width = max(1, width / 2);
852                            height = max(1, height / 2);
853                        }
854                    }
855                };
856            } else if desc.sample_count > 1 {
857                unsafe {
858                    gl.tex_storage_2d_multisample(
859                        target,
860                        desc.sample_count as i32,
861                        format_desc.internal,
862                        desc.size.width as i32,
863                        desc.size.height as i32,
864                        true,
865                    )
866                };
867            } else {
868                unsafe {
869                    if self
870                        .shared
871                        .private_caps
872                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
873                    {
874                        gl.tex_storage_2d(
875                            target,
876                            desc.mip_level_count as i32,
877                            format_desc.internal,
878                            desc.size.width as i32,
879                            desc.size.height as i32,
880                        )
881                    } else if target == glow::TEXTURE_CUBE_MAP {
882                        let mut width = desc.size.width;
883                        let mut height = desc.size.width;
884                        for i in 0..desc.mip_level_count {
885                            for face in [
886                                glow::TEXTURE_CUBE_MAP_POSITIVE_X,
887                                glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
888                                glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
889                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
890                                glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
891                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
892                            ] {
893                                gl.tex_image_2d(
894                                    face,
895                                    i as i32,
896                                    format_desc.internal as i32,
897                                    width as i32,
898                                    height as i32,
899                                    0,
900                                    format_desc.external,
901                                    format_desc.data_type,
902                                    glow::PixelUnpackData::Slice(None),
903                                );
904                            }
905                            width = max(1, width / 2);
906                            height = max(1, height / 2);
907                        }
908                    } else {
909                        let mut width = desc.size.width;
910                        let mut height = desc.size.width;
911                        for i in 0..desc.mip_level_count {
912                            gl.tex_image_2d(
913                                target,
914                                i as i32,
915                                format_desc.internal as i32,
916                                width as i32,
917                                height as i32,
918                                0,
919                                format_desc.external,
920                                format_desc.data_type,
921                                glow::PixelUnpackData::Slice(None),
922                            );
923                            width = max(1, width / 2);
924                            height = max(1, height / 2);
925                        }
926                    }
927                };
928            }
929
930            #[cfg(native)]
931            if let Some(label) = desc.label {
932                if self
933                    .shared
934                    .private_caps
935                    .contains(PrivateCapabilities::DEBUG_FNS)
936                {
937                    let name = raw.0.get();
938                    unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
939                }
940            }
941
942            unsafe { gl.bind_texture(target, None) };
943            super::TextureInner::Texture { raw, target }
944        };
945
946        self.counters.textures.add(1);
947
948        Ok(super::Texture {
949            inner,
950            drop_guard: None,
951            mip_level_count: desc.mip_level_count,
952            array_layer_count: desc.array_layer_count(),
953            format: desc.format,
954            format_desc,
955            copy_size: desc.copy_extent(),
956        })
957    }
958
959    unsafe fn destroy_texture(&self, texture: super::Texture) {
960        if texture.drop_guard.is_none() {
961            let gl = &self.shared.context.lock();
962            match texture.inner {
963                super::TextureInner::Renderbuffer { raw, .. } => {
964                    unsafe { gl.delete_renderbuffer(raw) };
965                }
966                super::TextureInner::DefaultRenderbuffer => {}
967                super::TextureInner::Texture { raw, .. } => {
968                    unsafe { gl.delete_texture(raw) };
969                }
970                #[cfg(webgl)]
971                super::TextureInner::ExternalFramebuffer { .. } => {}
972            }
973        }
974
975        // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
976        // end of the scope will drop the drop guard since this function takes ownership of the texture.
977        drop(texture.drop_guard);
978
979        self.counters.textures.sub(1);
980    }
981
982    unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
983        self.counters.textures.add(1);
984    }
985
986    unsafe fn create_texture_view(
987        &self,
988        texture: &super::Texture,
989        desc: &crate::TextureViewDescriptor,
990    ) -> Result<super::TextureView, crate::DeviceError> {
991        self.counters.texture_views.add(1);
992        Ok(super::TextureView {
993            //TODO: use `conv::map_view_dimension(desc.dimension)`?
994            inner: texture.inner.clone(),
995            aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
996            mip_levels: desc.range.mip_range(texture.mip_level_count),
997            array_layers: desc.range.layer_range(texture.array_layer_count),
998            format: texture.format,
999        })
1000    }
1001
1002    unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1003        self.counters.texture_views.sub(1);
1004    }
1005
1006    unsafe fn create_sampler(
1007        &self,
1008        desc: &crate::SamplerDescriptor,
1009    ) -> Result<super::Sampler, crate::DeviceError> {
1010        let gl = &self.shared.context.lock();
1011
1012        let raw = unsafe { gl.create_sampler().unwrap() };
1013
1014        let (min, mag) =
1015            conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1016
1017        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1018        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1019
1020        unsafe {
1021            gl.sampler_parameter_i32(
1022                raw,
1023                glow::TEXTURE_WRAP_S,
1024                conv::map_address_mode(desc.address_modes[0]) as i32,
1025            )
1026        };
1027        unsafe {
1028            gl.sampler_parameter_i32(
1029                raw,
1030                glow::TEXTURE_WRAP_T,
1031                conv::map_address_mode(desc.address_modes[1]) as i32,
1032            )
1033        };
1034        unsafe {
1035            gl.sampler_parameter_i32(
1036                raw,
1037                glow::TEXTURE_WRAP_R,
1038                conv::map_address_mode(desc.address_modes[2]) as i32,
1039            )
1040        };
1041
1042        if let Some(border_color) = desc.border_color {
1043            let border = match border_color {
1044                wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1045                    [0.0; 4]
1046                }
1047                wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1048                wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1049            };
1050            unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1051        }
1052
1053        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1054        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1055
1056        // If clamp is not 1, we know anisotropy is supported up to 16x
1057        if desc.anisotropy_clamp != 1 {
1058            unsafe {
1059                gl.sampler_parameter_i32(
1060                    raw,
1061                    glow::TEXTURE_MAX_ANISOTROPY,
1062                    desc.anisotropy_clamp as i32,
1063                )
1064            };
1065        }
1066
1067        //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
1068
1069        if let Some(compare) = desc.compare {
1070            unsafe {
1071                gl.sampler_parameter_i32(
1072                    raw,
1073                    glow::TEXTURE_COMPARE_MODE,
1074                    glow::COMPARE_REF_TO_TEXTURE as i32,
1075                )
1076            };
1077            unsafe {
1078                gl.sampler_parameter_i32(
1079                    raw,
1080                    glow::TEXTURE_COMPARE_FUNC,
1081                    conv::map_compare_func(compare) as i32,
1082                )
1083            };
1084        }
1085
1086        #[cfg(native)]
1087        if let Some(label) = desc.label {
1088            if self
1089                .shared
1090                .private_caps
1091                .contains(PrivateCapabilities::DEBUG_FNS)
1092            {
1093                let name = raw.0.get();
1094                unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1095            }
1096        }
1097
1098        self.counters.samplers.add(1);
1099
1100        Ok(super::Sampler { raw })
1101    }
1102
1103    unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1104        let gl = &self.shared.context.lock();
1105        unsafe { gl.delete_sampler(sampler.raw) };
1106        self.counters.samplers.sub(1);
1107    }
1108
1109    unsafe fn create_command_encoder(
1110        &self,
1111        _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1112    ) -> Result<super::CommandEncoder, crate::DeviceError> {
1113        self.counters.command_encoders.add(1);
1114
1115        Ok(super::CommandEncoder {
1116            cmd_buffer: super::CommandBuffer::default(),
1117            state: Default::default(),
1118            private_caps: self.shared.private_caps,
1119            counters: Arc::clone(&self.counters),
1120        })
1121    }
1122
1123    unsafe fn create_bind_group_layout(
1124        &self,
1125        desc: &crate::BindGroupLayoutDescriptor,
1126    ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1127        self.counters.bind_group_layouts.add(1);
1128        Ok(super::BindGroupLayout {
1129            entries: Arc::from(desc.entries),
1130        })
1131    }
1132
1133    unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1134        self.counters.bind_group_layouts.sub(1);
1135    }
1136
1137    unsafe fn create_pipeline_layout(
1138        &self,
1139        desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1140    ) -> Result<super::PipelineLayout, crate::DeviceError> {
1141        use naga::back::glsl;
1142
1143        let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1144        let mut num_samplers = 0u8;
1145        let mut num_textures = 0u8;
1146        let mut num_images = 0u8;
1147        let mut num_uniform_buffers = 0u8;
1148        let mut num_storage_buffers = 0u8;
1149
1150        let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1151        writer_flags.set(
1152            glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1153            self.shared
1154                .private_caps
1155                .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1156        );
1157        writer_flags.set(
1158            glsl::WriterFlags::DRAW_PARAMETERS,
1159            self.shared
1160                .private_caps
1161                .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1162        );
1163        // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
1164        // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
1165        writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1166        let mut binding_map = glsl::BindingMap::default();
1167
1168        for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1169            // create a vector with the size enough to hold all the bindings, filled with `!0`
1170            let mut binding_to_slot = vec![
1171                !0;
1172                bg_layout
1173                    .entries
1174                    .iter()
1175                    .map(|b| b.binding)
1176                    .max()
1177                    .map_or(0, |idx| idx as usize + 1)
1178            ]
1179            .into_boxed_slice();
1180
1181            for entry in bg_layout.entries.iter() {
1182                let counter = match entry.ty {
1183                    wgt::BindingType::Sampler { .. } => &mut num_samplers,
1184                    wgt::BindingType::Texture { .. } => &mut num_textures,
1185                    wgt::BindingType::StorageTexture { .. } => &mut num_images,
1186                    wgt::BindingType::Buffer {
1187                        ty: wgt::BufferBindingType::Uniform,
1188                        ..
1189                    } => &mut num_uniform_buffers,
1190                    wgt::BindingType::Buffer {
1191                        ty: wgt::BufferBindingType::Storage { .. },
1192                        ..
1193                    } => &mut num_storage_buffers,
1194                    wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1195                };
1196
1197                binding_to_slot[entry.binding as usize] = *counter;
1198                let br = naga::ResourceBinding {
1199                    group: group_index as u32,
1200                    binding: entry.binding,
1201                };
1202                binding_map.insert(br, *counter);
1203                *counter += entry.count.map_or(1, |c| c.get() as u8);
1204            }
1205
1206            group_infos.push(super::BindGroupLayoutInfo {
1207                entries: Arc::clone(&bg_layout.entries),
1208                binding_to_slot,
1209            });
1210        }
1211
1212        self.counters.pipeline_layouts.add(1);
1213
1214        Ok(super::PipelineLayout {
1215            group_infos: group_infos.into_boxed_slice(),
1216            naga_options: glsl::Options {
1217                version: self.shared.shading_language_version,
1218                writer_flags,
1219                binding_map,
1220                zero_initialize_workgroup_memory: true,
1221            },
1222        })
1223    }
1224
1225    unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1226        self.counters.pipeline_layouts.sub(1);
1227    }
1228
1229    unsafe fn create_bind_group(
1230        &self,
1231        desc: &crate::BindGroupDescriptor<
1232            super::BindGroupLayout,
1233            super::Buffer,
1234            super::Sampler,
1235            super::TextureView,
1236            super::AccelerationStructure,
1237        >,
1238    ) -> Result<super::BindGroup, crate::DeviceError> {
1239        let mut contents = Vec::new();
1240
1241        let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1242            let layout = desc
1243                .layout
1244                .entries
1245                .iter()
1246                .find(|layout_entry| layout_entry.binding == entry.binding)
1247                .expect("internal error: no layout entry found with binding slot");
1248            (entry, layout)
1249        });
1250        for (entry, layout) in layout_and_entry_iter {
1251            let binding = match layout.ty {
1252                wgt::BindingType::Buffer { .. } => {
1253                    let bb = &desc.buffers[entry.resource_index as usize];
1254                    super::RawBinding::Buffer {
1255                        raw: bb.buffer.raw.unwrap(),
1256                        offset: bb.offset as i32,
1257                        size: match bb.size {
1258                            Some(s) => s.get() as i32,
1259                            None => (bb.buffer.size - bb.offset) as i32,
1260                        },
1261                    }
1262                }
1263                wgt::BindingType::Sampler { .. } => {
1264                    let sampler = desc.samplers[entry.resource_index as usize];
1265                    super::RawBinding::Sampler(sampler.raw)
1266                }
1267                wgt::BindingType::Texture { view_dimension, .. } => {
1268                    let view = desc.textures[entry.resource_index as usize].view;
1269                    if view.array_layers.start != 0 {
1270                        log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1271                            "This is an implementation problem of wgpu-hal/gles backend.")
1272                    }
1273                    let (raw, target) = view.inner.as_native();
1274
1275                    super::Texture::log_failing_target_heuristics(view_dimension, target);
1276
1277                    super::RawBinding::Texture {
1278                        raw,
1279                        target,
1280                        aspects: view.aspects,
1281                        mip_levels: view.mip_levels.clone(),
1282                    }
1283                }
1284                wgt::BindingType::StorageTexture {
1285                    access,
1286                    format,
1287                    view_dimension,
1288                } => {
1289                    let view = desc.textures[entry.resource_index as usize].view;
1290                    let format_desc = self.shared.describe_texture_format(format);
1291                    let (raw, _target) = view.inner.as_native();
1292                    super::RawBinding::Image(super::ImageBinding {
1293                        raw,
1294                        mip_level: view.mip_levels.start,
1295                        array_layer: match view_dimension {
1296                            wgt::TextureViewDimension::D2Array
1297                            | wgt::TextureViewDimension::CubeArray => None,
1298                            _ => Some(view.array_layers.start),
1299                        },
1300                        access: conv::map_storage_access(access),
1301                        format: format_desc.internal,
1302                    })
1303                }
1304                wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1305            };
1306            contents.push(binding);
1307        }
1308
1309        self.counters.bind_groups.add(1);
1310
1311        Ok(super::BindGroup {
1312            contents: contents.into_boxed_slice(),
1313        })
1314    }
1315
1316    unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1317        self.counters.bind_groups.sub(1);
1318    }
1319
1320    unsafe fn create_shader_module(
1321        &self,
1322        desc: &crate::ShaderModuleDescriptor,
1323        shader: crate::ShaderInput,
1324    ) -> Result<super::ShaderModule, crate::ShaderError> {
1325        self.counters.shader_modules.add(1);
1326
1327        Ok(super::ShaderModule {
1328            naga: match shader {
1329                crate::ShaderInput::SpirV(_) => {
1330                    panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
1331                }
1332                crate::ShaderInput::Msl { .. } => {
1333                    panic!("`Features::MSL_SHADER_PASSTHROUGH` is not enabled")
1334                }
1335                crate::ShaderInput::Naga(naga) => naga,
1336            },
1337            label: desc.label.map(|str| str.to_string()),
1338            id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1339        })
1340    }
1341
1342    unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1343        self.counters.shader_modules.sub(1);
1344    }
1345
1346    unsafe fn create_render_pipeline(
1347        &self,
1348        desc: &crate::RenderPipelineDescriptor<
1349            super::PipelineLayout,
1350            super::ShaderModule,
1351            super::PipelineCache,
1352        >,
1353    ) -> Result<super::RenderPipeline, crate::PipelineError> {
1354        let gl = &self.shared.context.lock();
1355        let mut shaders = ArrayVec::new();
1356        shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
1357        if let Some(ref fs) = desc.fragment_stage {
1358            shaders.push((naga::ShaderStage::Fragment, fs));
1359        }
1360        let inner =
1361            unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1362
1363        let (vertex_buffers, vertex_attributes) = {
1364            let mut buffers = Vec::new();
1365            let mut attributes = Vec::new();
1366            for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
1367                buffers.push(super::VertexBufferDesc {
1368                    step: vb_layout.step_mode,
1369                    stride: vb_layout.array_stride as u32,
1370                });
1371                for vat in vb_layout.attributes.iter() {
1372                    let format_desc = conv::describe_vertex_format(vat.format);
1373                    attributes.push(super::AttributeDesc {
1374                        location: vat.shader_location,
1375                        offset: vat.offset as u32,
1376                        buffer_index: index as u32,
1377                        format_desc,
1378                    });
1379                }
1380            }
1381            (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1382        };
1383
1384        let color_targets = {
1385            let mut targets = Vec::new();
1386            for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1387                targets.push(super::ColorTargetDesc {
1388                    mask: ct.write_mask,
1389                    blend: ct.blend.as_ref().map(conv::map_blend),
1390                });
1391            }
1392            //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
1393            // is not exposed, then this pipeline will not bind correctly.
1394            targets.into_boxed_slice()
1395        };
1396
1397        self.counters.render_pipelines.add(1);
1398
1399        Ok(super::RenderPipeline {
1400            inner,
1401            primitive: desc.primitive,
1402            vertex_buffers,
1403            vertex_attributes,
1404            color_targets,
1405            depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1406                function: conv::map_compare_func(ds.depth_compare),
1407                mask: ds.depth_write_enabled,
1408            }),
1409            depth_bias: desc
1410                .depth_stencil
1411                .as_ref()
1412                .map(|ds| ds.bias)
1413                .unwrap_or_default(),
1414            stencil: desc
1415                .depth_stencil
1416                .as_ref()
1417                .map(|ds| conv::map_stencil(&ds.stencil)),
1418            alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1419        })
1420    }
1421    unsafe fn create_mesh_pipeline(
1422        &self,
1423        _desc: &crate::MeshPipelineDescriptor<
1424            <Self::A as crate::Api>::PipelineLayout,
1425            <Self::A as crate::Api>::ShaderModule,
1426            <Self::A as crate::Api>::PipelineCache,
1427        >,
1428    ) -> Result<<Self::A as crate::Api>::RenderPipeline, crate::PipelineError> {
1429        unreachable!()
1430    }
1431
1432    unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1433        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
1434        // This is safe to assume as long as:
1435        // - `RenderPipeline` can't be cloned
1436        // - The only place that we can get a new reference is during `program_cache.lock()`
1437        if Arc::strong_count(&pipeline.inner) == 2 {
1438            let gl = &self.shared.context.lock();
1439            let mut program_cache = self.shared.program_cache.lock();
1440            program_cache.retain(|_, v| match *v {
1441                Ok(ref p) => p.program != pipeline.inner.program,
1442                Err(_) => false,
1443            });
1444            unsafe { gl.delete_program(pipeline.inner.program) };
1445        }
1446
1447        self.counters.render_pipelines.sub(1);
1448    }
1449
1450    unsafe fn create_compute_pipeline(
1451        &self,
1452        desc: &crate::ComputePipelineDescriptor<
1453            super::PipelineLayout,
1454            super::ShaderModule,
1455            super::PipelineCache,
1456        >,
1457    ) -> Result<super::ComputePipeline, crate::PipelineError> {
1458        let gl = &self.shared.context.lock();
1459        let mut shaders = ArrayVec::new();
1460        shaders.push((naga::ShaderStage::Compute, &desc.stage));
1461        let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1462
1463        self.counters.compute_pipelines.add(1);
1464
1465        Ok(super::ComputePipeline { inner })
1466    }
1467
1468    unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1469        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
1470        // This is safe to assume as long as:
1471        // - `ComputePipeline` can't be cloned
1472        // - The only place that we can get a new reference is during `program_cache.lock()`
1473        if Arc::strong_count(&pipeline.inner) == 2 {
1474            let gl = &self.shared.context.lock();
1475            let mut program_cache = self.shared.program_cache.lock();
1476            program_cache.retain(|_, v| match *v {
1477                Ok(ref p) => p.program != pipeline.inner.program,
1478                Err(_) => false,
1479            });
1480            unsafe { gl.delete_program(pipeline.inner.program) };
1481        }
1482
1483        self.counters.compute_pipelines.sub(1);
1484    }
1485
1486    unsafe fn create_pipeline_cache(
1487        &self,
1488        _: &crate::PipelineCacheDescriptor<'_>,
1489    ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1490        // Even though the cache doesn't do anything, we still return something here
1491        // as the least bad option
1492        Ok(super::PipelineCache)
1493    }
1494    unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1495
1496    #[cfg_attr(target_arch = "wasm32", allow(unused))]
1497    unsafe fn create_query_set(
1498        &self,
1499        desc: &wgt::QuerySetDescriptor<crate::Label>,
1500    ) -> Result<super::QuerySet, crate::DeviceError> {
1501        let gl = &self.shared.context.lock();
1502
1503        let mut queries = Vec::with_capacity(desc.count as usize);
1504        for _ in 0..desc.count {
1505            let query =
1506                unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1507
1508            // We aren't really able to, in general, label queries.
1509            //
1510            // We could take a timestamp here to "initialize" the query,
1511            // but that's a bit of a hack, and we don't want to insert
1512            // random timestamps into the command stream of we don't have to.
1513
1514            queries.push(query);
1515        }
1516
1517        self.counters.query_sets.add(1);
1518
1519        Ok(super::QuerySet {
1520            queries: queries.into_boxed_slice(),
1521            target: match desc.ty {
1522                wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1523                wgt::QueryType::Timestamp => glow::TIMESTAMP,
1524                _ => unimplemented!(),
1525            },
1526        })
1527    }
1528
1529    unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1530        let gl = &self.shared.context.lock();
1531        for &query in set.queries.iter() {
1532            unsafe { gl.delete_query(query) };
1533        }
1534        self.counters.query_sets.sub(1);
1535    }
1536
1537    unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1538        self.counters.fences.add(1);
1539        Ok(super::Fence::new(&self.shared.options))
1540    }
1541
1542    unsafe fn destroy_fence(&self, fence: super::Fence) {
1543        let gl = &self.shared.context.lock();
1544        fence.destroy(gl);
1545        self.counters.fences.sub(1);
1546    }
1547
1548    unsafe fn get_fence_value(
1549        &self,
1550        fence: &super::Fence,
1551    ) -> Result<crate::FenceValue, crate::DeviceError> {
1552        #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1553        Ok(fence.get_latest(&self.shared.context.lock()))
1554    }
1555    unsafe fn wait(
1556        &self,
1557        fence: &super::Fence,
1558        wait_value: crate::FenceValue,
1559        timeout_ms: u32,
1560    ) -> Result<bool, crate::DeviceError> {
1561        if fence.satisfied(wait_value) {
1562            return Ok(true);
1563        }
1564
1565        let gl = &self.shared.context.lock();
1566        // MAX_CLIENT_WAIT_TIMEOUT_WEBGL is:
1567        // - 1s in Gecko https://searchfox.org/mozilla-central/rev/754074e05178e017ef6c3d8e30428ffa8f1b794d/dom/canvas/WebGLTypes.h#1386
1568        // - 0 in WebKit https://github.com/WebKit/WebKit/blob/4ef90d4672ca50267c0971b85db403d9684508ea/Source/WebCore/html/canvas/WebGL2RenderingContext.cpp#L110
1569        // - 0 in Chromium https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/modules/webgl/webgl2_rendering_context_base.cc;l=112;drc=a3cb0ac4c71ec04abfeaed199e5d63230eca2551
1570        let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1571            0
1572        } else {
1573            (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
1574        };
1575        fence.wait(gl, wait_value, timeout_ns)
1576    }
1577
1578    unsafe fn start_graphics_debugger_capture(&self) -> bool {
1579        #[cfg(all(native, feature = "renderdoc"))]
1580        return unsafe {
1581            self.render_doc
1582                .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1583        };
1584        #[allow(unreachable_code)]
1585        false
1586    }
1587    unsafe fn stop_graphics_debugger_capture(&self) {
1588        #[cfg(all(native, feature = "renderdoc"))]
1589        unsafe {
1590            self.render_doc
1591                .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1592        }
1593    }
1594    unsafe fn create_acceleration_structure(
1595        &self,
1596        _desc: &crate::AccelerationStructureDescriptor,
1597    ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1598        unimplemented!()
1599    }
1600    unsafe fn get_acceleration_structure_build_sizes<'a>(
1601        &self,
1602        _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1603    ) -> crate::AccelerationStructureBuildSizes {
1604        unimplemented!()
1605    }
1606    unsafe fn get_acceleration_structure_device_address(
1607        &self,
1608        _acceleration_structure: &super::AccelerationStructure,
1609    ) -> wgt::BufferAddress {
1610        unimplemented!()
1611    }
1612    unsafe fn destroy_acceleration_structure(
1613        &self,
1614        _acceleration_structure: super::AccelerationStructure,
1615    ) {
1616    }
1617
1618    fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1619        unimplemented!()
1620    }
1621
1622    fn get_internal_counters(&self) -> wgt::HalCounters {
1623        self.counters.as_ref().clone()
1624    }
1625}
1626
1627#[cfg(send_sync)]
1628unsafe impl Sync for super::Device {}
1629#[cfg(send_sync)]
1630unsafe impl Send for super::Device {}