wit_bindgen_core/
abi.rs

1pub use wit_parser::abi::{AbiVariant, WasmSignature, WasmType};
2use wit_parser::{
3    ElementInfo, Enum, Flags, FlagsRepr, Function, Handle, Int, Record, Resolve, Result_,
4    SizeAlign, Tuple, Type, TypeDefKind, TypeId, Variant,
5};
6
7// Helper macro for defining instructions without having to have tons of
8// exhaustive `match` statements to update
9macro_rules! def_instruction {
10    (
11        $( #[$enum_attr:meta] )*
12        pub enum $name:ident<'a> {
13            $(
14                $( #[$attr:meta] )*
15                $variant:ident $( {
16                    $($field:ident : $field_ty:ty $(,)* )*
17                } )?
18                    :
19                [$num_popped:expr] => [$num_pushed:expr],
20            )*
21        }
22    ) => {
23        $( #[$enum_attr] )*
24        pub enum $name<'a> {
25            $(
26                $( #[$attr] )*
27                $variant $( {
28                    $(
29                        $field : $field_ty,
30                    )*
31                } )? ,
32            )*
33        }
34
35        impl $name<'_> {
36            /// How many operands does this instruction pop from the stack?
37            #[allow(unused_variables)]
38            pub fn operands_len(&self) -> usize {
39                match self {
40                    $(
41                        Self::$variant $( {
42                            $(
43                                $field,
44                            )*
45                        } )? => $num_popped,
46                    )*
47                }
48            }
49
50            /// How many results does this instruction push onto the stack?
51            #[allow(unused_variables)]
52            pub fn results_len(&self) -> usize {
53                match self {
54                    $(
55                        Self::$variant $( {
56                            $(
57                                $field,
58                            )*
59                        } )? => $num_pushed,
60                    )*
61                }
62            }
63        }
64    };
65}
66
67def_instruction! {
68    #[derive(Debug)]
69    pub enum Instruction<'a> {
70        /// Acquires the specified parameter and places it on the stack.
71        /// Depending on the context this may refer to wasm parameters or
72        /// interface types parameters.
73        GetArg { nth: usize } : [0] => [1],
74
75        // Integer const/manipulation instructions
76
77        /// Pushes the constant `val` onto the stack.
78        I32Const { val: i32 } : [0] => [1],
79        /// Casts the top N items on the stack using the `Bitcast` enum
80        /// provided. Consumes the same number of operands that this produces.
81        Bitcasts { casts: &'a [Bitcast] } : [casts.len()] => [casts.len()],
82        /// Pushes a number of constant zeros for each wasm type on the stack.
83        ConstZero { tys: &'a [WasmType] } : [0] => [tys.len()],
84
85        // Memory load/store instructions
86
87        /// Pops a pointer from the stack and loads a little-endian `i32` from
88        /// it, using the specified constant offset.
89        I32Load { offset: i32 } : [1] => [1],
90        /// Pops a pointer from the stack and loads a little-endian `i8` from
91        /// it, using the specified constant offset. The value loaded is the
92        /// zero-extended to 32-bits
93        I32Load8U { offset: i32 } : [1] => [1],
94        /// Pops a pointer from the stack and loads a little-endian `i8` from
95        /// it, using the specified constant offset. The value loaded is the
96        /// sign-extended to 32-bits
97        I32Load8S { offset: i32 } : [1] => [1],
98        /// Pops a pointer from the stack and loads a little-endian `i16` from
99        /// it, using the specified constant offset. The value loaded is the
100        /// zero-extended to 32-bits
101        I32Load16U { offset: i32 } : [1] => [1],
102        /// Pops a pointer from the stack and loads a little-endian `i16` from
103        /// it, using the specified constant offset. The value loaded is the
104        /// sign-extended to 32-bits
105        I32Load16S { offset: i32 } : [1] => [1],
106        /// Pops a pointer from the stack and loads a little-endian `i64` from
107        /// it, using the specified constant offset.
108        I64Load { offset: i32 } : [1] => [1],
109        /// Pops a pointer from the stack and loads a little-endian `f32` from
110        /// it, using the specified constant offset.
111        F32Load { offset: i32 } : [1] => [1],
112        /// Pops a pointer from the stack and loads a little-endian `f64` from
113        /// it, using the specified constant offset.
114        F64Load { offset: i32 } : [1] => [1],
115
116        /// Like `I32Load` or `I64Load`, but for loading pointer values.
117        PointerLoad { offset: i32 } : [1] => [1],
118        /// Like `I32Load` or `I64Load`, but for loading array length values.
119        LengthLoad { offset: i32 } : [1] => [1],
120
121        /// Pops a pointer from the stack and then an `i32` value.
122        /// Stores the value in little-endian at the pointer specified plus the
123        /// constant `offset`.
124        I32Store { offset: i32 } : [2] => [0],
125        /// Pops a pointer from the stack and then an `i32` value.
126        /// Stores the low 8 bits of the value in little-endian at the pointer
127        /// specified plus the constant `offset`.
128        I32Store8 { offset: i32 } : [2] => [0],
129        /// Pops a pointer from the stack and then an `i32` value.
130        /// Stores the low 16 bits of the value in little-endian at the pointer
131        /// specified plus the constant `offset`.
132        I32Store16 { offset: i32 } : [2] => [0],
133        /// Pops a pointer from the stack and then an `i64` value.
134        /// Stores the value in little-endian at the pointer specified plus the
135        /// constant `offset`.
136        I64Store { offset: i32 } : [2] => [0],
137        /// Pops a pointer from the stack and then an `f32` value.
138        /// Stores the value in little-endian at the pointer specified plus the
139        /// constant `offset`.
140        F32Store { offset: i32 } : [2] => [0],
141        /// Pops a pointer from the stack and then an `f64` value.
142        /// Stores the value in little-endian at the pointer specified plus the
143        /// constant `offset`.
144        F64Store { offset: i32 } : [2] => [0],
145
146        /// Like `I32Store` or `I64Store`, but for storing pointer values.
147        PointerStore { offset: i32 } : [2] => [0],
148        /// Like `I32Store` or `I64Store`, but for storing array length values.
149        LengthStore { offset: i32 } : [2] => [0],
150
151        // Scalar lifting/lowering
152
153        /// Converts an interface type `char` value to a 32-bit integer
154        /// representing the unicode scalar value.
155        I32FromChar : [1] => [1],
156        /// Converts an interface type `u64` value to a wasm `i64`.
157        I64FromU64 : [1] => [1],
158        /// Converts an interface type `s64` value to a wasm `i64`.
159        I64FromS64 : [1] => [1],
160        /// Converts an interface type `u32` value to a wasm `i32`.
161        I32FromU32 : [1] => [1],
162        /// Converts an interface type `s32` value to a wasm `i32`.
163        I32FromS32 : [1] => [1],
164        /// Converts an interface type `u16` value to a wasm `i32`.
165        I32FromU16 : [1] => [1],
166        /// Converts an interface type `s16` value to a wasm `i32`.
167        I32FromS16 : [1] => [1],
168        /// Converts an interface type `u8` value to a wasm `i32`.
169        I32FromU8 : [1] => [1],
170        /// Converts an interface type `s8` value to a wasm `i32`.
171        I32FromS8 : [1] => [1],
172        /// Conversion an interface type `f32` value to a wasm `f32`.
173        ///
174        /// This may be a noop for some implementations, but it's here in case the
175        /// native language representation of `f32` is different than the wasm
176        /// representation of `f32`.
177        CoreF32FromF32 : [1] => [1],
178        /// Conversion an interface type `f64` value to a wasm `f64`.
179        ///
180        /// This may be a noop for some implementations, but it's here in case the
181        /// native language representation of `f64` is different than the wasm
182        /// representation of `f64`.
183        CoreF64FromF64 : [1] => [1],
184
185        /// Converts a native wasm `i32` to an interface type `s8`.
186        ///
187        /// This will truncate the upper bits of the `i32`.
188        S8FromI32 : [1] => [1],
189        /// Converts a native wasm `i32` to an interface type `u8`.
190        ///
191        /// This will truncate the upper bits of the `i32`.
192        U8FromI32 : [1] => [1],
193        /// Converts a native wasm `i32` to an interface type `s16`.
194        ///
195        /// This will truncate the upper bits of the `i32`.
196        S16FromI32 : [1] => [1],
197        /// Converts a native wasm `i32` to an interface type `u16`.
198        ///
199        /// This will truncate the upper bits of the `i32`.
200        U16FromI32 : [1] => [1],
201        /// Converts a native wasm `i32` to an interface type `s32`.
202        S32FromI32 : [1] => [1],
203        /// Converts a native wasm `i32` to an interface type `u32`.
204        U32FromI32 : [1] => [1],
205        /// Converts a native wasm `i64` to an interface type `s64`.
206        S64FromI64 : [1] => [1],
207        /// Converts a native wasm `i64` to an interface type `u64`.
208        U64FromI64 : [1] => [1],
209        /// Converts a native wasm `i32` to an interface type `char`.
210        ///
211        /// It's safe to assume that the `i32` is indeed a valid unicode code point.
212        CharFromI32 : [1] => [1],
213        /// Converts a native wasm `f32` to an interface type `f32`.
214        F32FromCoreF32 : [1] => [1],
215        /// Converts a native wasm `f64` to an interface type `f64`.
216        F64FromCoreF64 : [1] => [1],
217
218        /// Creates a `bool` from an `i32` input, trapping if the `i32` isn't
219        /// zero or one.
220        BoolFromI32 : [1] => [1],
221        /// Creates an `i32` from a `bool` input, must return 0 or 1.
222        I32FromBool : [1] => [1],
223
224        // lists
225
226        /// Lowers a list where the element's layout in the native language is
227        /// expected to match the canonical ABI definition of interface types.
228        ///
229        /// Pops a list value from the stack and pushes the pointer/length onto
230        /// the stack. If `realloc` is set to `Some` then this is expected to
231        /// *consume* the list which means that the data needs to be copied. An
232        /// allocation/copy is expected when:
233        ///
234        /// * A host is calling a wasm export with a list (it needs to copy the
235        ///   list in to the callee's module, allocating space with `realloc`)
236        /// * A wasm export is returning a list (it's expected to use `realloc`
237        ///   to give ownership of the list to the caller.
238        /// * A host is returning a list in a import definition, meaning that
239        ///   space needs to be allocated in the caller with `realloc`).
240        ///
241        /// A copy does not happen (e.g. `realloc` is `None`) when:
242        ///
243        /// * A wasm module calls an import with the list. In this situation
244        ///   it's expected the caller will know how to access this module's
245        ///   memory (e.g. the host has raw access or wasm-to-wasm communication
246        ///   would copy the list).
247        ///
248        /// If `realloc` is `Some` then the adapter is not responsible for
249        /// cleaning up this list because the other end is receiving the
250        /// allocation. If `realloc` is `None` then the adapter is responsible
251        /// for cleaning up any temporary allocation it created, if any.
252        ListCanonLower {
253            element: &'a Type,
254            realloc: Option<&'a str>,
255        } : [1] => [2],
256
257        /// Same as `ListCanonLower`, but used for strings
258        StringLower {
259            realloc: Option<&'a str>,
260        } : [1] => [2],
261
262        /// Lowers a list where the element's layout in the native language is
263        /// not expected to match the canonical ABI definition of interface
264        /// types.
265        ///
266        /// Pops a list value from the stack and pushes the pointer/length onto
267        /// the stack. This operation also pops a block from the block stack
268        /// which is used as the iteration body of writing each element of the
269        /// list consumed.
270        ///
271        /// The `realloc` field here behaves the same way as `ListCanonLower`.
272        /// It's only set to `None` when a wasm module calls a declared import.
273        /// Otherwise lowering in other contexts requires allocating memory for
274        /// the receiver to own.
275        ListLower {
276            element: &'a Type,
277            realloc: Option<&'a str>,
278        } : [1] => [2],
279
280        /// Lifts a list which has a canonical representation into an interface
281        /// types value.
282        ///
283        /// The term "canonical" representation here means that the
284        /// representation of the interface types value in the native language
285        /// exactly matches the canonical ABI definition of the type.
286        ///
287        /// This will consume two `i32` values from the stack, a pointer and a
288        /// length, and then produces an interface value list.
289        ListCanonLift {
290            element: &'a Type,
291            ty: TypeId,
292        } : [2] => [1],
293
294        /// Same as `ListCanonLift`, but used for strings
295        StringLift : [2] => [1],
296
297        /// Lifts a list which into an interface types value.
298        ///
299        /// This will consume two `i32` values from the stack, a pointer and a
300        /// length, and then produces an interface value list.
301        ///
302        /// This will also pop a block from the block stack which is how to
303        /// read each individual element from the list.
304        ListLift {
305            element: &'a Type,
306            ty: TypeId,
307        } : [2] => [1],
308
309        /// Pushes an operand onto the stack representing the list item from
310        /// each iteration of the list.
311        ///
312        /// This is only used inside of blocks related to lowering lists.
313        IterElem { element: &'a Type } : [0] => [1],
314
315        /// Pushes an operand onto the stack representing the base pointer of
316        /// the next element in a list.
317        ///
318        /// This is used for both lifting and lowering lists.
319        IterBasePointer : [0] => [1],
320
321        // records and tuples
322
323        /// Pops a record value off the stack, decomposes the record to all of
324        /// its fields, and then pushes the fields onto the stack.
325        RecordLower {
326            record: &'a Record,
327            name: &'a str,
328            ty: TypeId,
329        } : [1] => [record.fields.len()],
330
331        /// Pops all fields for a record off the stack and then composes them
332        /// into a record.
333        RecordLift {
334            record: &'a Record,
335            name: &'a str,
336            ty: TypeId,
337        } : [record.fields.len()] => [1],
338
339        /// Create an `i32` from a handle.
340        HandleLower {
341            handle: &'a Handle,
342            name: &'a str,
343            ty: TypeId,
344        } : [1] => [1],
345
346        /// Create a handle from an `i32`.
347        HandleLift {
348            handle: &'a Handle,
349            name: &'a str,
350            ty: TypeId,
351        } : [1] => [1],
352
353        /// Create an `i32` from a future.
354        FutureLower {
355            payload: &'a Option<Type>,
356            ty: TypeId,
357        } : [1] => [1],
358
359        /// Create a future from an `i32`.
360        FutureLift {
361            payload: &'a Option<Type>,
362            ty: TypeId,
363        } : [1] => [1],
364
365        /// Create an `i32` from a stream.
366        StreamLower {
367            payload: &'a Option<Type>,
368            ty: TypeId,
369        } : [1] => [1],
370
371        /// Create a stream from an `i32`.
372        StreamLift {
373            payload: &'a Option<Type>,
374            ty: TypeId,
375        } : [1] => [1],
376
377        /// Create an `i32` from an error-context.
378        ErrorContextLower : [1] => [1],
379
380        /// Create a error-context from an `i32`.
381        ErrorContextLift : [1] => [1],
382
383        /// Pops a tuple value off the stack, decomposes the tuple to all of
384        /// its fields, and then pushes the fields onto the stack.
385        TupleLower {
386            tuple: &'a Tuple,
387            ty: TypeId,
388        } : [1] => [tuple.types.len()],
389
390        /// Pops all fields for a tuple off the stack and then composes them
391        /// into a tuple.
392        TupleLift {
393            tuple: &'a Tuple,
394            ty: TypeId,
395        } : [tuple.types.len()] => [1],
396
397        /// Converts a language-specific record-of-bools to a list of `i32`.
398        FlagsLower {
399            flags: &'a Flags,
400            name: &'a str,
401            ty: TypeId,
402        } : [1] => [flags.repr().count()],
403        /// Converts a list of native wasm `i32` to a language-specific
404        /// record-of-bools.
405        FlagsLift {
406            flags: &'a Flags,
407            name: &'a str,
408            ty: TypeId,
409        } : [flags.repr().count()] => [1],
410
411        // variants
412
413        /// This is a special instruction used for `VariantLower`
414        /// instruction to determine the name of the payload, if present, to use
415        /// within each block.
416        ///
417        /// Each sub-block will have this be the first instruction, and if it
418        /// lowers a payload it will expect something bound to this name.
419        VariantPayloadName : [0] => [1],
420
421        /// Pops a variant off the stack as well as `ty.cases.len()` blocks
422        /// from the code generator. Uses each of those blocks and the value
423        /// from the stack to produce `nresults` of items.
424        VariantLower {
425            variant: &'a Variant,
426            name: &'a str,
427            ty: TypeId,
428            results: &'a [WasmType],
429        } : [1] => [results.len()],
430
431        /// Pops an `i32` off the stack as well as `ty.cases.len()` blocks
432        /// from the code generator. Uses each of those blocks and the value
433        /// from the stack to produce a final variant.
434        VariantLift {
435            variant: &'a Variant,
436            name: &'a str,
437            ty: TypeId,
438        } : [1] => [1],
439
440        /// Pops an enum off the stack and pushes the `i32` representation.
441        EnumLower {
442            enum_: &'a Enum,
443            name: &'a str,
444            ty: TypeId,
445        } : [1] => [1],
446
447        /// Pops an `i32` off the stack and lifts it into the `enum` specified.
448        EnumLift {
449            enum_: &'a Enum,
450            name: &'a str,
451            ty: TypeId,
452        } : [1] => [1],
453
454        /// Specialization of `VariantLower` for specifically `option<T>` types,
455        /// otherwise behaves the same as `VariantLower` (e.g. two blocks for
456        /// the two cases.
457        OptionLower {
458            payload: &'a Type,
459            ty: TypeId,
460            results: &'a [WasmType],
461        } : [1] => [results.len()],
462
463        /// Specialization of `VariantLift` for specifically the `option<T>`
464        /// type. Otherwise behaves the same as the `VariantLift` instruction
465        /// with two blocks for the lift.
466        OptionLift {
467            payload: &'a Type,
468            ty: TypeId,
469        } : [1] => [1],
470
471        /// Specialization of `VariantLower` for specifically `result<T, E>`
472        /// types, otherwise behaves the same as `VariantLower` (e.g. two blocks
473        /// for the two cases.
474        ResultLower {
475            result: &'a Result_
476            ty: TypeId,
477            results: &'a [WasmType],
478        } : [1] => [results.len()],
479
480        /// Specialization of `VariantLift` for specifically the `result<T,
481        /// E>` type. Otherwise behaves the same as the `VariantLift`
482        /// instruction with two blocks for the lift.
483        ResultLift {
484            result: &'a Result_,
485            ty: TypeId,
486        } : [1] => [1],
487
488        // calling/control flow
489
490        /// Represents a call to a raw WebAssembly API. The module/name are
491        /// provided inline as well as the types if necessary.
492        CallWasm {
493            name: &'a str,
494            sig: &'a WasmSignature,
495        } : [sig.params.len()] => [sig.results.len()],
496
497        /// Same as `CallWasm`, except the dual where an interface is being
498        /// called rather than a raw wasm function.
499        ///
500        /// Note that this will be used for async functions.
501        CallInterface {
502            func: &'a Function,
503            async_: bool,
504        } : [func.params.len()] => [if *async_ { 1 } else { usize::from(func.result.is_some()) }],
505
506        /// Returns `amt` values on the stack. This is always the last
507        /// instruction.
508        Return { amt: usize, func: &'a Function } : [*amt] => [0],
509
510        /// Calls the `realloc` function specified in a malloc-like fashion
511        /// allocating `size` bytes with alignment `align`.
512        ///
513        /// Pushes the returned pointer onto the stack.
514        Malloc {
515            realloc: &'static str,
516            size: usize,
517            align: usize,
518        } : [0] => [1],
519
520        /// Used exclusively for guest-code generation this indicates that
521        /// the standard memory deallocation function needs to be invoked with
522        /// the specified parameters.
523        ///
524        /// This will pop a pointer from the stack and push nothing.
525        GuestDeallocate {
526            size: usize,
527            align: usize,
528        } : [1] => [0],
529
530        /// Used exclusively for guest-code generation this indicates that
531        /// a string is being deallocated. The ptr/length are on the stack and
532        /// are poppped off and used to deallocate the string.
533        GuestDeallocateString : [2] => [0],
534
535        /// Used exclusively for guest-code generation this indicates that
536        /// a list is being deallocated. The ptr/length are on the stack and
537        /// are poppped off and used to deallocate the list.
538        ///
539        /// This variant also pops a block off the block stack to be used as the
540        /// body of the deallocation loop.
541        GuestDeallocateList {
542            element: &'a Type,
543        } : [2] => [0],
544
545        /// Used exclusively for guest-code generation this indicates that
546        /// a variant is being deallocated. The integer discriminant is popped
547        /// off the stack as well as `blocks` number of blocks popped from the
548        /// blocks stack. The variant is used to select, at runtime, which of
549        /// the blocks is executed to deallocate the variant.
550        GuestDeallocateVariant {
551            blocks: usize,
552        } : [1] => [0],
553
554        /// Allocate the parameter and/or return areas to use for an
555        /// async-lowered import call.
556        ///
557        /// This cannot be allocated on the (shadow-)stack since it needs to
558        /// remain valid until the callee has finished using the buffers, which
559        /// may be after we pop the current stack frame.
560        AsyncMalloc { size: usize, align: usize } : [0] => [1],
561
562        /// Call an async-lowered import.
563        ///
564        /// `size` and `align` are used to deallocate the parameter area
565        /// allocated using `AsyncMalloc` after the callee task returns a value.
566        AsyncCallWasm { name: &'a str, size: usize, align: usize } : [2] => [0],
567
568        /// Generate code to run after `CallInterface` for an async-lifted export.
569        ///
570        /// For example, this might include task management for the
571        /// future/promise/task returned by the call made for `CallInterface`.
572        AsyncPostCallInterface { func: &'a Function } : [1] => [usize::from(func.result.is_some()) + 1],
573
574        /// Call `task.return` for an async-lifted export once the task returned
575        /// by `CallInterface` and managed by `AsyncPostCallInterface`
576        /// yields a value.
577        AsyncCallReturn { name: &'a str, params: &'a [WasmType] } : [params.len()] => [0],
578
579        /// Force the evaluation of the specified number of expressions and push
580        /// the results to the stack.
581        ///
582        /// This is useful prior to disposing of temporary variables and/or
583        /// allocations which are referenced by one or more not-yet-evaluated
584        /// expressions.
585        Flush { amt: usize } : [*amt] => [*amt],
586    }
587}
588
589#[derive(Debug, PartialEq)]
590pub enum Bitcast {
591    // Upcasts
592    F32ToI32,
593    F64ToI64,
594    I32ToI64,
595    F32ToI64,
596
597    // Downcasts
598    I32ToF32,
599    I64ToF64,
600    I64ToI32,
601    I64ToF32,
602
603    // PointerOrI64 conversions. These preserve provenance when the source
604    // or destination is a pointer value.
605    //
606    // These are used when pointer values are being stored in
607    // (ToP64) and loaded out of (P64To) PointerOrI64 values, so they
608    // always have to preserve provenance when the value being loaded or
609    // stored is a pointer.
610    P64ToI64,
611    I64ToP64,
612    P64ToP,
613    PToP64,
614
615    // Pointer<->number conversions. These do not preserve provenance.
616    //
617    // These are used when integer or floating-point values are being stored in
618    // (I32ToP/etc.) and loaded out of (PToI32/etc.) pointer values, so they
619    // never have any provenance to preserve.
620    I32ToP,
621    PToI32,
622    PToL,
623    LToP,
624
625    // Number<->Number conversions.
626    I32ToL,
627    LToI32,
628    I64ToL,
629    LToI64,
630
631    // Multiple conversions in sequence.
632    Sequence(Box<[Bitcast; 2]>),
633
634    None,
635}
636
637/// Whether the glue code surrounding a call is lifting arguments and lowering
638/// results or vice versa.
639#[derive(Clone, Copy, PartialEq, Eq)]
640pub enum LiftLower {
641    /// When the glue code lifts arguments and lowers results.
642    ///
643    /// ```text
644    /// Wasm --lift-args--> SourceLanguage; call; SourceLanguage --lower-results--> Wasm
645    /// ```
646    LiftArgsLowerResults,
647    /// When the glue code lowers arguments and lifts results.
648    ///
649    /// ```text
650    /// SourceLanguage --lower-args--> Wasm; call; Wasm --lift-results--> SourceLanguage
651    /// ```
652    LowerArgsLiftResults,
653}
654
655/// Trait for language implementors to use to generate glue code between native
656/// WebAssembly signatures and interface types signatures.
657///
658/// This is used as an implementation detail in interpreting the ABI between
659/// interface types and wasm types. Eventually this will be driven by interface
660/// types adapters themselves, but for now the ABI of a function dictates what
661/// instructions are fed in.
662///
663/// Types implementing `Bindgen` are incrementally fed `Instruction` values to
664/// generate code for. Instructions operate like a stack machine where each
665/// instruction has a list of inputs and a list of outputs (provided by the
666/// `emit` function).
667pub trait Bindgen {
668    /// The intermediate type for fragments of code for this type.
669    ///
670    /// For most languages `String` is a suitable intermediate type.
671    type Operand: Clone;
672
673    /// Emit code to implement the given instruction.
674    ///
675    /// Each operand is given in `operands` and can be popped off if ownership
676    /// is required. It's guaranteed that `operands` has the appropriate length
677    /// for the `inst` given, as specified with [`Instruction`].
678    ///
679    /// Each result variable should be pushed onto `results`. This function must
680    /// push the appropriate number of results or binding generation will panic.
681    fn emit(
682        &mut self,
683        resolve: &Resolve,
684        inst: &Instruction<'_>,
685        operands: &mut Vec<Self::Operand>,
686        results: &mut Vec<Self::Operand>,
687    );
688
689    /// Gets a operand reference to the return pointer area.
690    ///
691    /// The provided size and alignment is for the function's return type.
692    fn return_pointer(&mut self, size: usize, align: usize) -> Self::Operand;
693
694    /// Enters a new block of code to generate code for.
695    ///
696    /// This is currently exclusively used for constructing variants. When a
697    /// variant is constructed a block here will be pushed for each case of a
698    /// variant, generating the code necessary to translate a variant case.
699    ///
700    /// Blocks are completed with `finish_block` below. It's expected that `emit`
701    /// will always push code (if necessary) into the "current block", which is
702    /// updated by calling this method and `finish_block` below.
703    fn push_block(&mut self);
704
705    /// Indicates to the code generator that a block is completed, and the
706    /// `operand` specified was the resulting value of the block.
707    ///
708    /// This method will be used to compute the value of each arm of lifting a
709    /// variant. The `operand` will be `None` if the variant case didn't
710    /// actually have any type associated with it. Otherwise it will be `Some`
711    /// as the last value remaining on the stack representing the value
712    /// associated with a variant's `case`.
713    ///
714    /// It's expected that this will resume code generation in the previous
715    /// block before `push_block` was called. This must also save the results
716    /// of the current block internally for instructions like `ResultLift` to
717    /// use later.
718    fn finish_block(&mut self, operand: &mut Vec<Self::Operand>);
719
720    /// Returns size information that was previously calculated for all types.
721    fn sizes(&self) -> &SizeAlign;
722
723    /// Returns whether or not the specified element type is represented in a
724    /// "canonical" form for lists. This dictates whether the `ListCanonLower`
725    /// and `ListCanonLift` instructions are used or not.
726    fn is_list_canonical(&self, resolve: &Resolve, element: &Type) -> bool;
727}
728
729/// Generates an abstract sequence of instructions which represents this
730/// function being adapted as an imported function.
731///
732/// The instructions here, when executed, will emulate a language with
733/// interface types calling the concrete wasm implementation. The parameters
734/// for the returned instruction sequence are the language's own
735/// interface-types parameters. One instruction in the instruction stream
736/// will be a `Call` which represents calling the actual raw wasm function
737/// signature.
738///
739/// This function is useful, for example, if you're building a language
740/// generator for WASI bindings. This will document how to translate
741/// language-specific values into the wasm types to call a WASI function,
742/// and it will also automatically convert the results of the WASI function
743/// back to a language-specific value.
744pub fn call(
745    resolve: &Resolve,
746    variant: AbiVariant,
747    lift_lower: LiftLower,
748    func: &Function,
749    bindgen: &mut impl Bindgen,
750    async_: bool,
751) {
752    Generator::new(resolve, variant, lift_lower, bindgen, async_).call(func);
753}
754
755pub fn lower_to_memory<B: Bindgen>(
756    resolve: &Resolve,
757    bindgen: &mut B,
758    address: B::Operand,
759    value: B::Operand,
760    ty: &Type,
761) {
762    // TODO: refactor so we don't need to pass in a bunch of unused dummy parameters:
763    let mut generator = Generator::new(
764        resolve,
765        AbiVariant::GuestImport,
766        LiftLower::LowerArgsLiftResults,
767        bindgen,
768        true,
769    );
770    generator.stack.push(value);
771    generator.write_to_memory(ty, address, 0);
772}
773
774pub fn lift_from_memory<B: Bindgen>(
775    resolve: &Resolve,
776    bindgen: &mut B,
777    address: B::Operand,
778    ty: &Type,
779) -> B::Operand {
780    // TODO: refactor so we don't need to pass in a bunch of unused dummy parameters:
781    let mut generator = Generator::new(
782        resolve,
783        AbiVariant::GuestImport,
784        LiftLower::LowerArgsLiftResults,
785        bindgen,
786        true,
787    );
788    generator.read_from_memory(ty, address, 0);
789    generator.stack.pop().unwrap()
790}
791
792/// Used in a similar manner as the `Interface::call` function except is
793/// used to generate the `post-return` callback for `func`.
794///
795/// This is only intended to be used in guest generators for exported
796/// functions and will primarily generate `GuestDeallocate*` instructions,
797/// plus others used as input to those instructions.
798pub fn post_return(resolve: &Resolve, func: &Function, bindgen: &mut impl Bindgen, async_: bool) {
799    Generator::new(
800        resolve,
801        AbiVariant::GuestExport,
802        LiftLower::LiftArgsLowerResults,
803        bindgen,
804        async_,
805    )
806    .post_return(func);
807}
808/// Returns whether the `Function` specified needs a post-return function to
809/// be generated in guest code.
810///
811/// This is used when the return value contains a memory allocation such as
812/// a list or a string primarily.
813pub fn guest_export_needs_post_return(resolve: &Resolve, func: &Function) -> bool {
814    func.result
815        .map(|t| needs_post_return(resolve, &t))
816        .unwrap_or(false)
817}
818
819fn needs_post_return(resolve: &Resolve, ty: &Type) -> bool {
820    match ty {
821        Type::String => true,
822        Type::ErrorContext => true,
823        Type::Id(id) => match &resolve.types[*id].kind {
824            TypeDefKind::List(_) => true,
825            TypeDefKind::Type(t) => needs_post_return(resolve, t),
826            TypeDefKind::Handle(_) => false,
827            TypeDefKind::Resource => false,
828            TypeDefKind::Record(r) => r.fields.iter().any(|f| needs_post_return(resolve, &f.ty)),
829            TypeDefKind::Tuple(t) => t.types.iter().any(|t| needs_post_return(resolve, t)),
830            TypeDefKind::Variant(t) => t
831                .cases
832                .iter()
833                .filter_map(|t| t.ty.as_ref())
834                .any(|t| needs_post_return(resolve, t)),
835            TypeDefKind::Option(t) => needs_post_return(resolve, t),
836            TypeDefKind::Result(t) => [&t.ok, &t.err]
837                .iter()
838                .filter_map(|t| t.as_ref())
839                .any(|t| needs_post_return(resolve, t)),
840            TypeDefKind::Flags(_) | TypeDefKind::Enum(_) => false,
841            TypeDefKind::Future(_) | TypeDefKind::Stream(_) => false,
842            TypeDefKind::Unknown => unreachable!(),
843        },
844
845        Type::Bool
846        | Type::U8
847        | Type::S8
848        | Type::U16
849        | Type::S16
850        | Type::U32
851        | Type::S32
852        | Type::U64
853        | Type::S64
854        | Type::F32
855        | Type::F64
856        | Type::Char => false,
857    }
858}
859
860struct Generator<'a, B: Bindgen> {
861    variant: AbiVariant,
862    lift_lower: LiftLower,
863    bindgen: &'a mut B,
864    async_: bool,
865    resolve: &'a Resolve,
866    operands: Vec<B::Operand>,
867    results: Vec<B::Operand>,
868    stack: Vec<B::Operand>,
869    return_pointer: Option<B::Operand>,
870}
871
872impl<'a, B: Bindgen> Generator<'a, B> {
873    fn new(
874        resolve: &'a Resolve,
875        variant: AbiVariant,
876        lift_lower: LiftLower,
877        bindgen: &'a mut B,
878        async_: bool,
879    ) -> Generator<'a, B> {
880        Generator {
881            resolve,
882            variant,
883            lift_lower,
884            bindgen,
885            async_,
886            operands: Vec::new(),
887            results: Vec::new(),
888            stack: Vec::new(),
889            return_pointer: None,
890        }
891    }
892
893    fn call(&mut self, func: &Function) {
894        const MAX_FLAT_PARAMS: usize = 16;
895
896        let sig = self.resolve.wasm_signature(self.variant, func);
897
898        match self.lift_lower {
899            LiftLower::LowerArgsLiftResults => {
900                if let (AbiVariant::GuestExport, true) = (self.variant, self.async_) {
901                    unimplemented!("host-side code generation for async lift/lower not supported");
902                }
903
904                let lower_to_memory = |self_: &mut Self, ptr: B::Operand| {
905                    let mut offset = 0usize;
906                    for (nth, (_, ty)) in func.params.iter().enumerate() {
907                        self_.emit(&Instruction::GetArg { nth });
908                        offset = align_to(offset, self_.bindgen.sizes().align(ty).align_wasm32());
909                        self_.write_to_memory(ty, ptr.clone(), offset as i32);
910                        offset += self_.bindgen.sizes().size(ty).size_wasm32();
911                    }
912
913                    self_.stack.push(ptr);
914                };
915
916                let params_size_align = if self.async_ {
917                    let ElementInfo { size, align } = self
918                        .bindgen
919                        .sizes()
920                        .record(func.params.iter().map(|(_, ty)| ty));
921                    self.emit(&Instruction::AsyncMalloc {
922                        size: size.size_wasm32(),
923                        align: align.align_wasm32(),
924                    });
925                    let ptr = self.stack.pop().unwrap();
926                    lower_to_memory(self, ptr);
927                    Some((size, align))
928                } else {
929                    if !sig.indirect_params {
930                        // If the parameters for this function aren't indirect
931                        // (there aren't too many) then we simply do a normal lower
932                        // operation for them all.
933                        for (nth, (_, ty)) in func.params.iter().enumerate() {
934                            self.emit(&Instruction::GetArg { nth });
935                            self.lower(ty);
936                        }
937                    } else {
938                        // ... otherwise if parameters are indirect space is
939                        // allocated from them and each argument is lowered
940                        // individually into memory.
941                        let info = self
942                            .bindgen
943                            .sizes()
944                            .record(func.params.iter().map(|t| &t.1));
945                        let ptr = match self.variant {
946                            // When a wasm module calls an import it will provide
947                            // space that isn't explicitly deallocated.
948                            AbiVariant::GuestImport => self
949                                .bindgen
950                                .return_pointer(info.size.size_wasm32(), info.align.align_wasm32()),
951                            // When calling a wasm module from the outside, though,
952                            // malloc needs to be called.
953                            AbiVariant::GuestExport => {
954                                self.emit(&Instruction::Malloc {
955                                    realloc: "cabi_realloc",
956                                    size: info.size.size_wasm32(),
957                                    align: info.align.align_wasm32(),
958                                });
959                                self.stack.pop().unwrap()
960                            }
961                            AbiVariant::GuestImportAsync
962                            | AbiVariant::GuestExportAsync
963                            | AbiVariant::GuestExportAsyncStackful => {
964                                unreachable!()
965                            }
966                        };
967                        lower_to_memory(self, ptr);
968                    }
969                    None
970                };
971
972                // If necessary we may need to prepare a return pointer for
973                // this ABI.
974                let dealloc_size_align =
975                    if let Some((params_size, params_align)) = params_size_align {
976                        let ElementInfo { size, align } =
977                            self.bindgen.sizes().record(func.result.iter());
978                        self.emit(&Instruction::AsyncMalloc {
979                            size: size.size_wasm32(),
980                            align: align.align_wasm32(),
981                        });
982                        let ptr = self.stack.pop().unwrap();
983                        self.return_pointer = Some(ptr.clone());
984                        self.stack.push(ptr);
985
986                        assert_eq!(self.stack.len(), 2);
987                        self.emit(&Instruction::AsyncCallWasm {
988                            name: &format!("[async]{}", func.name),
989                            size: params_size.size_wasm32(),
990                            align: params_align.align_wasm32(),
991                        });
992                        Some((size, align))
993                    } else {
994                        if self.variant == AbiVariant::GuestImport && sig.retptr {
995                            let info = self.bindgen.sizes().params(&func.result);
996                            let ptr = self
997                                .bindgen
998                                .return_pointer(info.size.size_wasm32(), info.align.align_wasm32());
999                            self.return_pointer = Some(ptr.clone());
1000                            self.stack.push(ptr);
1001                        }
1002
1003                        assert_eq!(self.stack.len(), sig.params.len());
1004                        self.emit(&Instruction::CallWasm {
1005                            name: &func.name,
1006                            sig: &sig,
1007                        });
1008                        None
1009                    };
1010
1011                if !(sig.retptr || self.async_) {
1012                    // With no return pointer in use we can simply lift the
1013                    // result(s) of the function from the result of the core
1014                    // wasm function.
1015                    if let Some(ty) = &func.result {
1016                        self.lift(ty)
1017                    }
1018                } else {
1019                    let ptr = match self.variant {
1020                        // imports into guests means it's a wasm module
1021                        // calling an imported function. We supplied the
1022                        // return pointer as the last argument (saved in
1023                        // `self.return_pointer`) so we use that to read
1024                        // the result of the function from memory.
1025                        AbiVariant::GuestImport => {
1026                            assert!(sig.results.is_empty() || self.async_);
1027                            self.return_pointer.take().unwrap()
1028                        }
1029
1030                        // guest exports means that this is a host
1031                        // calling wasm so wasm returned a pointer to where
1032                        // the result is stored
1033                        AbiVariant::GuestExport => self.stack.pop().unwrap(),
1034
1035                        AbiVariant::GuestImportAsync
1036                        | AbiVariant::GuestExportAsync
1037                        | AbiVariant::GuestExportAsyncStackful => {
1038                            unreachable!()
1039                        }
1040                    };
1041
1042                    self.read_results_from_memory(&func.result, ptr.clone(), 0);
1043                    self.emit(&Instruction::Flush {
1044                        amt: usize::from(func.result.is_some()),
1045                    });
1046
1047                    if let Some((size, align)) = dealloc_size_align {
1048                        self.stack.push(ptr);
1049                        self.emit(&Instruction::GuestDeallocate {
1050                            size: size.size_wasm32(),
1051                            align: align.align_wasm32(),
1052                        });
1053                    }
1054                }
1055
1056                self.emit(&Instruction::Return {
1057                    func,
1058                    amt: usize::from(func.result.is_some()),
1059                });
1060            }
1061            LiftLower::LiftArgsLowerResults => {
1062                if let (AbiVariant::GuestImport, true) = (self.variant, self.async_) {
1063                    todo!("implement host-side support for async lift/lower");
1064                }
1065
1066                let read_from_memory = |self_: &mut Self| {
1067                    let mut offset = 0usize;
1068                    let ptr = self_.stack.pop().unwrap();
1069                    for (_, ty) in func.params.iter() {
1070                        offset = align_to(offset, self_.bindgen.sizes().align(ty).align_wasm32());
1071                        self_.read_from_memory(ty, ptr.clone(), offset as i32);
1072                        offset += self_.bindgen.sizes().size(ty).size_wasm32();
1073                    }
1074                };
1075
1076                if !sig.indirect_params {
1077                    // If parameters are not passed indirectly then we lift each
1078                    // argument in succession from the component wasm types that
1079                    // make-up the type.
1080                    let mut offset = 0;
1081                    let mut temp = Vec::new();
1082                    for (_, ty) in func.params.iter() {
1083                        temp.truncate(0);
1084                        self.resolve.push_flat(ty, &mut temp);
1085                        for _ in 0..temp.len() {
1086                            self.emit(&Instruction::GetArg { nth: offset });
1087                            offset += 1;
1088                        }
1089                        self.lift(ty);
1090                    }
1091                } else {
1092                    // ... otherwise argument is read in succession from memory
1093                    // where the pointer to the arguments is the first argument
1094                    // to the function.
1095                    self.emit(&Instruction::GetArg { nth: 0 });
1096                    read_from_memory(self);
1097                }
1098
1099                // ... and that allows us to call the interface types function
1100                self.emit(&Instruction::CallInterface {
1101                    func,
1102                    async_: self.async_,
1103                });
1104
1105                let (lower_to_memory, async_results) = if self.async_ {
1106                    self.emit(&Instruction::AsyncPostCallInterface { func });
1107
1108                    let mut results = Vec::new();
1109                    if let Some(ty) = &func.result {
1110                        self.resolve.push_flat(ty, &mut results);
1111                    }
1112                    (results.len() > MAX_FLAT_PARAMS, Some(results))
1113                } else {
1114                    (sig.retptr, None)
1115                };
1116
1117                // This was dynamically allocated by the caller (or async start
1118                // function) so after it's been read by the guest we need to
1119                // deallocate it.
1120                if let AbiVariant::GuestExport = self.variant {
1121                    if sig.indirect_params && !self.async_ {
1122                        let info = self
1123                            .bindgen
1124                            .sizes()
1125                            .record(func.params.iter().map(|t| &t.1));
1126                        self.emit(&Instruction::GetArg { nth: 0 });
1127                        self.emit(&Instruction::GuestDeallocate {
1128                            size: info.size.size_wasm32(),
1129                            align: info.align.align_wasm32(),
1130                        });
1131                    }
1132                }
1133
1134                if !lower_to_memory {
1135                    // With no return pointer in use we simply lower the
1136                    // result(s) and return that directly from the function.
1137                    if let Some(ty) = &func.result {
1138                        self.lower(ty);
1139                    }
1140                } else {
1141                    match self.variant {
1142                        // When a function is imported to a guest this means
1143                        // it's a host providing the implementation of the
1144                        // import. The result is stored in the pointer
1145                        // specified in the last argument, so we get the
1146                        // pointer here and then write the return value into
1147                        // it.
1148                        AbiVariant::GuestImport => {
1149                            self.emit(&Instruction::GetArg {
1150                                nth: sig.params.len() - 1,
1151                            });
1152                            let ptr = self.stack.pop().unwrap();
1153                            self.write_params_to_memory(&func.result, ptr, 0);
1154                        }
1155
1156                        // For a guest import this is a function defined in
1157                        // wasm, so we're returning a pointer where the
1158                        // value was stored at. Allocate some space here
1159                        // (statically) and then write the result into that
1160                        // memory, returning the pointer at the end.
1161                        AbiVariant::GuestExport => {
1162                            let info = self.bindgen.sizes().params(&func.result);
1163                            let ptr = self
1164                                .bindgen
1165                                .return_pointer(info.size.size_wasm32(), info.align.align_wasm32());
1166                            self.write_params_to_memory(&func.result, ptr.clone(), 0);
1167                            self.stack.push(ptr);
1168                        }
1169
1170                        AbiVariant::GuestImportAsync
1171                        | AbiVariant::GuestExportAsync
1172                        | AbiVariant::GuestExportAsyncStackful => {
1173                            unreachable!()
1174                        }
1175                    }
1176                }
1177
1178                if let Some(results) = async_results {
1179                    let name = &format!("[task-return]{}", func.name);
1180
1181                    self.emit(&Instruction::AsyncCallReturn {
1182                        name,
1183                        params: &if results.len() > MAX_FLAT_PARAMS {
1184                            vec![WasmType::Pointer]
1185                        } else {
1186                            results
1187                        },
1188                    });
1189                    self.emit(&Instruction::Return { func, amt: 1 });
1190                } else {
1191                    self.emit(&Instruction::Return {
1192                        func,
1193                        amt: sig.results.len(),
1194                    });
1195                }
1196            }
1197        }
1198
1199        assert!(
1200            self.stack.is_empty(),
1201            "stack has {} items remaining",
1202            self.stack.len()
1203        );
1204    }
1205
1206    fn post_return(&mut self, func: &Function) {
1207        let sig = self.resolve.wasm_signature(self.variant, func);
1208
1209        // Currently post-return is only used for lists and lists are always
1210        // returned indirectly through memory due to their flat representation
1211        // having more than one type. Assert that a return pointer is used,
1212        // though, in case this ever changes.
1213        assert!(sig.retptr);
1214
1215        self.emit(&Instruction::GetArg { nth: 0 });
1216        let addr = self.stack.pop().unwrap();
1217        for (offset, ty) in self.bindgen.sizes().field_offsets(&func.result) {
1218            let offset = offset.size_wasm32();
1219            let offset = i32::try_from(offset).unwrap();
1220            self.deallocate(ty, addr.clone(), offset);
1221        }
1222        self.emit(&Instruction::Return { func, amt: 0 });
1223
1224        assert!(
1225            self.stack.is_empty(),
1226            "stack has {} items remaining",
1227            self.stack.len()
1228        );
1229    }
1230
1231    fn emit(&mut self, inst: &Instruction<'_>) {
1232        self.operands.clear();
1233        self.results.clear();
1234
1235        let operands_len = inst.operands_len();
1236        assert!(
1237            self.stack.len() >= operands_len,
1238            "not enough operands on stack for {:?}",
1239            inst
1240        );
1241        self.operands
1242            .extend(self.stack.drain((self.stack.len() - operands_len)..));
1243        self.results.reserve(inst.results_len());
1244
1245        self.bindgen
1246            .emit(self.resolve, inst, &mut self.operands, &mut self.results);
1247
1248        assert_eq!(
1249            self.results.len(),
1250            inst.results_len(),
1251            "{:?} expected {} results, got {}",
1252            inst,
1253            inst.results_len(),
1254            self.results.len()
1255        );
1256        self.stack.append(&mut self.results);
1257    }
1258
1259    fn push_block(&mut self) {
1260        self.bindgen.push_block();
1261    }
1262
1263    fn finish_block(&mut self, size: usize) {
1264        self.operands.clear();
1265        assert!(
1266            size <= self.stack.len(),
1267            "not enough operands on stack for finishing block",
1268        );
1269        self.operands
1270            .extend(self.stack.drain((self.stack.len() - size)..));
1271        self.bindgen.finish_block(&mut self.operands);
1272    }
1273
1274    fn lower(&mut self, ty: &Type) {
1275        use Instruction::*;
1276
1277        match *ty {
1278            Type::Bool => self.emit(&I32FromBool),
1279            Type::S8 => self.emit(&I32FromS8),
1280            Type::U8 => self.emit(&I32FromU8),
1281            Type::S16 => self.emit(&I32FromS16),
1282            Type::U16 => self.emit(&I32FromU16),
1283            Type::S32 => self.emit(&I32FromS32),
1284            Type::U32 => self.emit(&I32FromU32),
1285            Type::S64 => self.emit(&I64FromS64),
1286            Type::U64 => self.emit(&I64FromU64),
1287            Type::Char => self.emit(&I32FromChar),
1288            Type::F32 => self.emit(&CoreF32FromF32),
1289            Type::F64 => self.emit(&CoreF64FromF64),
1290            Type::String => {
1291                let realloc = self.list_realloc();
1292                self.emit(&StringLower { realloc });
1293            }
1294            Type::ErrorContext => self.emit(&ErrorContextLower),
1295            Type::Id(id) => match &self.resolve.types[id].kind {
1296                TypeDefKind::Type(t) => self.lower(t),
1297                TypeDefKind::List(element) => {
1298                    let realloc = self.list_realloc();
1299                    if self.bindgen.is_list_canonical(self.resolve, element) {
1300                        self.emit(&ListCanonLower { element, realloc });
1301                    } else {
1302                        self.push_block();
1303                        self.emit(&IterElem { element });
1304                        self.emit(&IterBasePointer);
1305                        let addr = self.stack.pop().unwrap();
1306                        self.write_to_memory(element, addr, 0);
1307                        self.finish_block(0);
1308                        self.emit(&ListLower { element, realloc });
1309                    }
1310                }
1311                TypeDefKind::Handle(handle) => {
1312                    let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1313                    self.emit(&HandleLower {
1314                        handle,
1315                        ty: id,
1316                        name: self.resolve.types[*ty].name.as_deref().unwrap(),
1317                    });
1318                }
1319                TypeDefKind::Resource => {
1320                    todo!();
1321                }
1322                TypeDefKind::Record(record) => {
1323                    self.emit(&RecordLower {
1324                        record,
1325                        ty: id,
1326                        name: self.resolve.types[id].name.as_deref().unwrap(),
1327                    });
1328                    let values = self
1329                        .stack
1330                        .drain(self.stack.len() - record.fields.len()..)
1331                        .collect::<Vec<_>>();
1332                    for (field, value) in record.fields.iter().zip(values) {
1333                        self.stack.push(value);
1334                        self.lower(&field.ty);
1335                    }
1336                }
1337                TypeDefKind::Tuple(tuple) => {
1338                    self.emit(&TupleLower { tuple, ty: id });
1339                    let values = self
1340                        .stack
1341                        .drain(self.stack.len() - tuple.types.len()..)
1342                        .collect::<Vec<_>>();
1343                    for (ty, value) in tuple.types.iter().zip(values) {
1344                        self.stack.push(value);
1345                        self.lower(ty);
1346                    }
1347                }
1348
1349                TypeDefKind::Flags(flags) => {
1350                    self.emit(&FlagsLower {
1351                        flags,
1352                        ty: id,
1353                        name: self.resolve.types[id].name.as_ref().unwrap(),
1354                    });
1355                }
1356
1357                TypeDefKind::Variant(v) => {
1358                    let results =
1359                        self.lower_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1360                    self.emit(&VariantLower {
1361                        variant: v,
1362                        ty: id,
1363                        results: &results,
1364                        name: self.resolve.types[id].name.as_deref().unwrap(),
1365                    });
1366                }
1367                TypeDefKind::Enum(enum_) => {
1368                    self.emit(&EnumLower {
1369                        enum_,
1370                        ty: id,
1371                        name: self.resolve.types[id].name.as_deref().unwrap(),
1372                    });
1373                }
1374                TypeDefKind::Option(t) => {
1375                    let results = self.lower_variant_arms(ty, [None, Some(t)]);
1376                    self.emit(&OptionLower {
1377                        payload: t,
1378                        ty: id,
1379                        results: &results,
1380                    });
1381                }
1382                TypeDefKind::Result(r) => {
1383                    let results = self.lower_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1384                    self.emit(&ResultLower {
1385                        result: r,
1386                        ty: id,
1387                        results: &results,
1388                    });
1389                }
1390                TypeDefKind::Future(ty) => {
1391                    self.emit(&FutureLower {
1392                        payload: ty,
1393                        ty: id,
1394                    });
1395                }
1396                TypeDefKind::Stream(ty) => {
1397                    self.emit(&StreamLower {
1398                        payload: ty,
1399                        ty: id,
1400                    });
1401                }
1402                TypeDefKind::Unknown => unreachable!(),
1403            },
1404        }
1405    }
1406
1407    fn lower_variant_arms<'b>(
1408        &mut self,
1409        ty: &Type,
1410        cases: impl IntoIterator<Item = Option<&'b Type>>,
1411    ) -> Vec<WasmType> {
1412        use Instruction::*;
1413        let mut results = Vec::new();
1414        let mut temp = Vec::new();
1415        let mut casts = Vec::new();
1416        self.resolve.push_flat(ty, &mut results);
1417        for (i, ty) in cases.into_iter().enumerate() {
1418            self.push_block();
1419            self.emit(&VariantPayloadName);
1420            let payload_name = self.stack.pop().unwrap();
1421            self.emit(&I32Const { val: i as i32 });
1422            let mut pushed = 1;
1423            if let Some(ty) = ty {
1424                // Using the payload of this block we lower the type to
1425                // raw wasm values.
1426                self.stack.push(payload_name);
1427                self.lower(ty);
1428
1429                // Determine the types of all the wasm values we just
1430                // pushed, and record how many. If we pushed too few
1431                // then we'll need to push some zeros after this.
1432                temp.truncate(0);
1433                self.resolve.push_flat(ty, &mut temp);
1434                pushed += temp.len();
1435
1436                // For all the types pushed we may need to insert some
1437                // bitcasts. This will go through and cast everything
1438                // to the right type to ensure all blocks produce the
1439                // same set of results.
1440                casts.truncate(0);
1441                for (actual, expected) in temp.iter().zip(&results[1..]) {
1442                    casts.push(cast(*actual, *expected));
1443                }
1444                if casts.iter().any(|c| *c != Bitcast::None) {
1445                    self.emit(&Bitcasts { casts: &casts });
1446                }
1447            }
1448
1449            // If we haven't pushed enough items in this block to match
1450            // what other variants are pushing then we need to push
1451            // some zeros.
1452            if pushed < results.len() {
1453                self.emit(&ConstZero {
1454                    tys: &results[pushed..],
1455                });
1456            }
1457            self.finish_block(results.len());
1458        }
1459        results
1460    }
1461
1462    fn list_realloc(&self) -> Option<&'static str> {
1463        // Lowering parameters calling a wasm import _or_ returning a result
1464        // from an async-lifted wasm export means we don't need to pass
1465        // ownership, but we pass ownership in all other cases.
1466        match (self.variant, self.lift_lower, self.async_) {
1467            (AbiVariant::GuestImport, LiftLower::LowerArgsLiftResults, _)
1468            | (AbiVariant::GuestExport, LiftLower::LiftArgsLowerResults, true) => None,
1469            _ => Some("cabi_realloc"),
1470        }
1471    }
1472
1473    /// Note that in general everything in this function is the opposite of the
1474    /// `lower` function above. This is intentional and should be kept this way!
1475    fn lift(&mut self, ty: &Type) {
1476        use Instruction::*;
1477
1478        match *ty {
1479            Type::Bool => self.emit(&BoolFromI32),
1480            Type::S8 => self.emit(&S8FromI32),
1481            Type::U8 => self.emit(&U8FromI32),
1482            Type::S16 => self.emit(&S16FromI32),
1483            Type::U16 => self.emit(&U16FromI32),
1484            Type::S32 => self.emit(&S32FromI32),
1485            Type::U32 => self.emit(&U32FromI32),
1486            Type::S64 => self.emit(&S64FromI64),
1487            Type::U64 => self.emit(&U64FromI64),
1488            Type::Char => self.emit(&CharFromI32),
1489            Type::F32 => self.emit(&F32FromCoreF32),
1490            Type::F64 => self.emit(&F64FromCoreF64),
1491            Type::String => self.emit(&StringLift),
1492            Type::ErrorContext => self.emit(&ErrorContextLift),
1493            Type::Id(id) => match &self.resolve.types[id].kind {
1494                TypeDefKind::Type(t) => self.lift(t),
1495                TypeDefKind::List(element) => {
1496                    if self.bindgen.is_list_canonical(self.resolve, element) {
1497                        self.emit(&ListCanonLift { element, ty: id });
1498                    } else {
1499                        self.push_block();
1500                        self.emit(&IterBasePointer);
1501                        let addr = self.stack.pop().unwrap();
1502                        self.read_from_memory(element, addr, 0);
1503                        self.finish_block(1);
1504                        self.emit(&ListLift { element, ty: id });
1505                    }
1506                }
1507                TypeDefKind::Handle(handle) => {
1508                    let (Handle::Own(ty) | Handle::Borrow(ty)) = handle;
1509                    self.emit(&HandleLift {
1510                        handle,
1511                        ty: id,
1512                        name: self.resolve.types[*ty].name.as_deref().unwrap(),
1513                    });
1514                }
1515                TypeDefKind::Resource => {
1516                    todo!();
1517                }
1518                TypeDefKind::Record(record) => {
1519                    let mut temp = Vec::new();
1520                    self.resolve.push_flat(ty, &mut temp);
1521                    let mut args = self
1522                        .stack
1523                        .drain(self.stack.len() - temp.len()..)
1524                        .collect::<Vec<_>>();
1525                    for field in record.fields.iter() {
1526                        temp.truncate(0);
1527                        self.resolve.push_flat(&field.ty, &mut temp);
1528                        self.stack.extend(args.drain(..temp.len()));
1529                        self.lift(&field.ty);
1530                    }
1531                    self.emit(&RecordLift {
1532                        record,
1533                        ty: id,
1534                        name: self.resolve.types[id].name.as_deref().unwrap(),
1535                    });
1536                }
1537                TypeDefKind::Tuple(tuple) => {
1538                    let mut temp = Vec::new();
1539                    self.resolve.push_flat(ty, &mut temp);
1540                    let mut args = self
1541                        .stack
1542                        .drain(self.stack.len() - temp.len()..)
1543                        .collect::<Vec<_>>();
1544                    for ty in tuple.types.iter() {
1545                        temp.truncate(0);
1546                        self.resolve.push_flat(ty, &mut temp);
1547                        self.stack.extend(args.drain(..temp.len()));
1548                        self.lift(ty);
1549                    }
1550                    self.emit(&TupleLift { tuple, ty: id });
1551                }
1552                TypeDefKind::Flags(flags) => {
1553                    self.emit(&FlagsLift {
1554                        flags,
1555                        ty: id,
1556                        name: self.resolve.types[id].name.as_ref().unwrap(),
1557                    });
1558                }
1559
1560                TypeDefKind::Variant(v) => {
1561                    self.lift_variant_arms(ty, v.cases.iter().map(|c| c.ty.as_ref()));
1562                    self.emit(&VariantLift {
1563                        variant: v,
1564                        ty: id,
1565                        name: self.resolve.types[id].name.as_deref().unwrap(),
1566                    });
1567                }
1568
1569                TypeDefKind::Enum(enum_) => {
1570                    self.emit(&EnumLift {
1571                        enum_,
1572                        ty: id,
1573                        name: self.resolve.types[id].name.as_deref().unwrap(),
1574                    });
1575                }
1576
1577                TypeDefKind::Option(t) => {
1578                    self.lift_variant_arms(ty, [None, Some(t)]);
1579                    self.emit(&OptionLift { payload: t, ty: id });
1580                }
1581
1582                TypeDefKind::Result(r) => {
1583                    self.lift_variant_arms(ty, [r.ok.as_ref(), r.err.as_ref()]);
1584                    self.emit(&ResultLift { result: r, ty: id });
1585                }
1586
1587                TypeDefKind::Future(ty) => {
1588                    self.emit(&FutureLift {
1589                        payload: ty,
1590                        ty: id,
1591                    });
1592                }
1593                TypeDefKind::Stream(ty) => {
1594                    self.emit(&StreamLift {
1595                        payload: ty,
1596                        ty: id,
1597                    });
1598                }
1599                TypeDefKind::Unknown => unreachable!(),
1600            },
1601        }
1602    }
1603
1604    fn lift_variant_arms<'b>(
1605        &mut self,
1606        ty: &Type,
1607        cases: impl IntoIterator<Item = Option<&'b Type>>,
1608    ) {
1609        let mut params = Vec::new();
1610        let mut temp = Vec::new();
1611        let mut casts = Vec::new();
1612        self.resolve.push_flat(ty, &mut params);
1613        let block_inputs = self
1614            .stack
1615            .drain(self.stack.len() + 1 - params.len()..)
1616            .collect::<Vec<_>>();
1617        for ty in cases {
1618            self.push_block();
1619            if let Some(ty) = ty {
1620                // Push only the values we need for this variant onto
1621                // the stack.
1622                temp.truncate(0);
1623                self.resolve.push_flat(ty, &mut temp);
1624                self.stack
1625                    .extend(block_inputs[..temp.len()].iter().cloned());
1626
1627                // Cast all the types we have on the stack to the actual
1628                // types needed for this variant, if necessary.
1629                casts.truncate(0);
1630                for (actual, expected) in temp.iter().zip(&params[1..]) {
1631                    casts.push(cast(*expected, *actual));
1632                }
1633                if casts.iter().any(|c| *c != Bitcast::None) {
1634                    self.emit(&Instruction::Bitcasts { casts: &casts });
1635                }
1636
1637                // Then recursively lift this variant's payload.
1638                self.lift(ty);
1639            }
1640            self.finish_block(ty.is_some() as usize);
1641        }
1642    }
1643
1644    fn write_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
1645        use Instruction::*;
1646
1647        match *ty {
1648            // Builtin types need different flavors of storage instructions
1649            // depending on the size of the value written.
1650            Type::Bool | Type::U8 | Type::S8 => {
1651                self.lower_and_emit(ty, addr, &I32Store8 { offset })
1652            }
1653            Type::U16 | Type::S16 => self.lower_and_emit(ty, addr, &I32Store16 { offset }),
1654            Type::U32 | Type::S32 | Type::Char => {
1655                self.lower_and_emit(ty, addr, &I32Store { offset })
1656            }
1657            Type::U64 | Type::S64 => self.lower_and_emit(ty, addr, &I64Store { offset }),
1658            Type::F32 => self.lower_and_emit(ty, addr, &F32Store { offset }),
1659            Type::F64 => self.lower_and_emit(ty, addr, &F64Store { offset }),
1660            Type::String => self.write_list_to_memory(ty, addr, offset),
1661            Type::ErrorContext => self.lower_and_emit(ty, addr, &I32Store { offset }),
1662
1663            Type::Id(id) => match &self.resolve.types[id].kind {
1664                TypeDefKind::Type(t) => self.write_to_memory(t, addr, offset),
1665                TypeDefKind::List(_) => self.write_list_to_memory(ty, addr, offset),
1666
1667                TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1668                    self.lower_and_emit(ty, addr, &I32Store { offset })
1669                }
1670
1671                // Decompose the record into its components and then write all
1672                // the components into memory one-by-one.
1673                TypeDefKind::Record(record) => {
1674                    self.emit(&RecordLower {
1675                        record,
1676                        ty: id,
1677                        name: self.resolve.types[id].name.as_deref().unwrap(),
1678                    });
1679                    self.write_fields_to_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1680                }
1681                TypeDefKind::Resource => {
1682                    todo!()
1683                }
1684                TypeDefKind::Tuple(tuple) => {
1685                    self.emit(&TupleLower { tuple, ty: id });
1686                    self.write_fields_to_memory(tuple.types.iter(), addr, offset);
1687                }
1688
1689                TypeDefKind::Flags(f) => {
1690                    self.lower(ty);
1691                    match f.repr() {
1692                        FlagsRepr::U8 => {
1693                            self.stack.push(addr);
1694                            self.store_intrepr(offset, Int::U8);
1695                        }
1696                        FlagsRepr::U16 => {
1697                            self.stack.push(addr);
1698                            self.store_intrepr(offset, Int::U16);
1699                        }
1700                        FlagsRepr::U32(n) => {
1701                            for i in (0..n).rev() {
1702                                self.stack.push(addr.clone());
1703                                self.emit(&I32Store {
1704                                    offset: offset + (i as i32) * 4,
1705                                });
1706                            }
1707                        }
1708                    }
1709                }
1710
1711                // Each case will get its own block, and the first item in each
1712                // case is writing the discriminant. After that if we have a
1713                // payload we write the payload after the discriminant, aligned up
1714                // to the type's alignment.
1715                TypeDefKind::Variant(v) => {
1716                    self.write_variant_arms_to_memory(
1717                        offset,
1718                        addr,
1719                        v.tag(),
1720                        v.cases.iter().map(|c| c.ty.as_ref()),
1721                    );
1722                    self.emit(&VariantLower {
1723                        variant: v,
1724                        ty: id,
1725                        results: &[],
1726                        name: self.resolve.types[id].name.as_deref().unwrap(),
1727                    });
1728                }
1729
1730                TypeDefKind::Option(t) => {
1731                    self.write_variant_arms_to_memory(offset, addr, Int::U8, [None, Some(t)]);
1732                    self.emit(&OptionLower {
1733                        payload: t,
1734                        ty: id,
1735                        results: &[],
1736                    });
1737                }
1738
1739                TypeDefKind::Result(r) => {
1740                    self.write_variant_arms_to_memory(
1741                        offset,
1742                        addr,
1743                        Int::U8,
1744                        [r.ok.as_ref(), r.err.as_ref()],
1745                    );
1746                    self.emit(&ResultLower {
1747                        result: r,
1748                        ty: id,
1749                        results: &[],
1750                    });
1751                }
1752
1753                TypeDefKind::Enum(e) => {
1754                    self.lower(ty);
1755                    self.stack.push(addr);
1756                    self.store_intrepr(offset, e.tag());
1757                }
1758
1759                TypeDefKind::Unknown => unreachable!(),
1760            },
1761        }
1762    }
1763
1764    fn write_params_to_memory<'b>(
1765        &mut self,
1766        params: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1767        addr: B::Operand,
1768        offset: i32,
1769    ) {
1770        self.write_fields_to_memory(params, addr, offset);
1771    }
1772
1773    fn write_variant_arms_to_memory<'b>(
1774        &mut self,
1775        offset: i32,
1776        addr: B::Operand,
1777        tag: Int,
1778        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1779    ) {
1780        let payload_offset = offset
1781            + (self
1782                .bindgen
1783                .sizes()
1784                .payload_offset(tag, cases.clone())
1785                .size_wasm32() as i32);
1786        for (i, ty) in cases.into_iter().enumerate() {
1787            self.push_block();
1788            self.emit(&Instruction::VariantPayloadName);
1789            let payload_name = self.stack.pop().unwrap();
1790            self.emit(&Instruction::I32Const { val: i as i32 });
1791            self.stack.push(addr.clone());
1792            self.store_intrepr(offset, tag);
1793            if let Some(ty) = ty {
1794                self.stack.push(payload_name.clone());
1795                self.write_to_memory(ty, addr.clone(), payload_offset);
1796            }
1797            self.finish_block(0);
1798        }
1799    }
1800
1801    fn write_list_to_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
1802        // After lowering the list there's two i32 values on the stack
1803        // which we write into memory, writing the pointer into the low address
1804        // and the length into the high address.
1805        self.lower(ty);
1806        self.stack.push(addr.clone());
1807        self.emit(&Instruction::LengthStore { offset: offset + 4 });
1808        self.stack.push(addr);
1809        self.emit(&Instruction::PointerStore { offset });
1810    }
1811
1812    fn write_fields_to_memory<'b>(
1813        &mut self,
1814        tys: impl IntoIterator<Item = &'b Type, IntoIter: ExactSizeIterator>,
1815        addr: B::Operand,
1816        offset: i32,
1817    ) {
1818        let tys = tys.into_iter();
1819        let fields = self
1820            .stack
1821            .drain(self.stack.len() - tys.len()..)
1822            .collect::<Vec<_>>();
1823        for ((field_offset, ty), op) in self
1824            .bindgen
1825            .sizes()
1826            .field_offsets(tys)
1827            .into_iter()
1828            .zip(fields)
1829        {
1830            self.stack.push(op);
1831            let field_offset = field_offset.size_wasm32();
1832            self.write_to_memory(ty, addr.clone(), offset + (field_offset as i32));
1833        }
1834    }
1835
1836    fn lower_and_emit(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
1837        self.lower(ty);
1838        self.stack.push(addr);
1839        self.emit(instr);
1840    }
1841
1842    fn read_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
1843        use Instruction::*;
1844
1845        match *ty {
1846            Type::Bool => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1847            Type::U8 => self.emit_and_lift(ty, addr, &I32Load8U { offset }),
1848            Type::S8 => self.emit_and_lift(ty, addr, &I32Load8S { offset }),
1849            Type::U16 => self.emit_and_lift(ty, addr, &I32Load16U { offset }),
1850            Type::S16 => self.emit_and_lift(ty, addr, &I32Load16S { offset }),
1851            Type::U32 | Type::S32 | Type::Char => self.emit_and_lift(ty, addr, &I32Load { offset }),
1852            Type::U64 | Type::S64 => self.emit_and_lift(ty, addr, &I64Load { offset }),
1853            Type::F32 => self.emit_and_lift(ty, addr, &F32Load { offset }),
1854            Type::F64 => self.emit_and_lift(ty, addr, &F64Load { offset }),
1855            Type::String => self.read_list_from_memory(ty, addr, offset),
1856            Type::ErrorContext => self.emit_and_lift(ty, addr, &I32Load { offset }),
1857
1858            Type::Id(id) => match &self.resolve.types[id].kind {
1859                TypeDefKind::Type(t) => self.read_from_memory(t, addr, offset),
1860
1861                TypeDefKind::List(_) => self.read_list_from_memory(ty, addr, offset),
1862
1863                TypeDefKind::Future(_) | TypeDefKind::Stream(_) | TypeDefKind::Handle(_) => {
1864                    self.emit_and_lift(ty, addr, &I32Load { offset })
1865                }
1866
1867                TypeDefKind::Resource => {
1868                    todo!();
1869                }
1870
1871                // Read and lift each field individually, adjusting the offset
1872                // as we go along, then aggregate all the fields into the
1873                // record.
1874                TypeDefKind::Record(record) => {
1875                    self.read_fields_from_memory(record.fields.iter().map(|f| &f.ty), addr, offset);
1876                    self.emit(&RecordLift {
1877                        record,
1878                        ty: id,
1879                        name: self.resolve.types[id].name.as_deref().unwrap(),
1880                    });
1881                }
1882
1883                TypeDefKind::Tuple(tuple) => {
1884                    self.read_fields_from_memory(&tuple.types, addr, offset);
1885                    self.emit(&TupleLift { tuple, ty: id });
1886                }
1887
1888                TypeDefKind::Flags(f) => {
1889                    match f.repr() {
1890                        FlagsRepr::U8 => {
1891                            self.stack.push(addr);
1892                            self.load_intrepr(offset, Int::U8);
1893                        }
1894                        FlagsRepr::U16 => {
1895                            self.stack.push(addr);
1896                            self.load_intrepr(offset, Int::U16);
1897                        }
1898                        FlagsRepr::U32(n) => {
1899                            for i in 0..n {
1900                                self.stack.push(addr.clone());
1901                                self.emit(&I32Load {
1902                                    offset: offset + (i as i32) * 4,
1903                                });
1904                            }
1905                        }
1906                    }
1907                    self.lift(ty);
1908                }
1909
1910                // Each case will get its own block, and we'll dispatch to the
1911                // right block based on the `i32.load` we initially perform. Each
1912                // individual block is pretty simple and just reads the payload type
1913                // from the corresponding offset if one is available.
1914                TypeDefKind::Variant(variant) => {
1915                    self.read_variant_arms_from_memory(
1916                        offset,
1917                        addr,
1918                        variant.tag(),
1919                        variant.cases.iter().map(|c| c.ty.as_ref()),
1920                    );
1921                    self.emit(&VariantLift {
1922                        variant,
1923                        ty: id,
1924                        name: self.resolve.types[id].name.as_deref().unwrap(),
1925                    });
1926                }
1927
1928                TypeDefKind::Option(t) => {
1929                    self.read_variant_arms_from_memory(offset, addr, Int::U8, [None, Some(t)]);
1930                    self.emit(&OptionLift { payload: t, ty: id });
1931                }
1932
1933                TypeDefKind::Result(r) => {
1934                    self.read_variant_arms_from_memory(
1935                        offset,
1936                        addr,
1937                        Int::U8,
1938                        [r.ok.as_ref(), r.err.as_ref()],
1939                    );
1940                    self.emit(&ResultLift { result: r, ty: id });
1941                }
1942
1943                TypeDefKind::Enum(e) => {
1944                    self.stack.push(addr.clone());
1945                    self.load_intrepr(offset, e.tag());
1946                    self.lift(ty);
1947                }
1948
1949                TypeDefKind::Unknown => unreachable!(),
1950            },
1951        }
1952    }
1953
1954    fn read_results_from_memory(&mut self, result: &Option<Type>, addr: B::Operand, offset: i32) {
1955        self.read_fields_from_memory(result, addr, offset)
1956    }
1957
1958    fn read_variant_arms_from_memory<'b>(
1959        &mut self,
1960        offset: i32,
1961        addr: B::Operand,
1962        tag: Int,
1963        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
1964    ) {
1965        self.stack.push(addr.clone());
1966        self.load_intrepr(offset, tag);
1967        let payload_offset = offset
1968            + (self
1969                .bindgen
1970                .sizes()
1971                .payload_offset(tag, cases.clone())
1972                .size_wasm32() as i32);
1973        for ty in cases {
1974            self.push_block();
1975            if let Some(ty) = ty {
1976                self.read_from_memory(ty, addr.clone(), payload_offset);
1977            }
1978            self.finish_block(ty.is_some() as usize);
1979        }
1980    }
1981
1982    fn read_list_from_memory(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
1983        // Read the pointer/len and then perform the standard lifting
1984        // proceses.
1985        self.stack.push(addr.clone());
1986        self.emit(&Instruction::PointerLoad { offset });
1987        self.stack.push(addr);
1988        self.emit(&Instruction::LengthLoad { offset: offset + 4 });
1989        self.lift(ty);
1990    }
1991
1992    fn read_fields_from_memory<'b>(
1993        &mut self,
1994        tys: impl IntoIterator<Item = &'b Type>,
1995        addr: B::Operand,
1996        offset: i32,
1997    ) {
1998        for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys).iter() {
1999            let field_offset = field_offset.size_wasm32();
2000            self.read_from_memory(ty, addr.clone(), offset + (field_offset as i32));
2001        }
2002    }
2003
2004    fn emit_and_lift(&mut self, ty: &Type, addr: B::Operand, instr: &Instruction) {
2005        self.stack.push(addr);
2006        self.emit(instr);
2007        self.lift(ty);
2008    }
2009
2010    fn load_intrepr(&mut self, offset: i32, repr: Int) {
2011        self.emit(&match repr {
2012            Int::U64 => Instruction::I64Load { offset },
2013            Int::U32 => Instruction::I32Load { offset },
2014            Int::U16 => Instruction::I32Load16U { offset },
2015            Int::U8 => Instruction::I32Load8U { offset },
2016        });
2017    }
2018
2019    fn store_intrepr(&mut self, offset: i32, repr: Int) {
2020        self.emit(&match repr {
2021            Int::U64 => Instruction::I64Store { offset },
2022            Int::U32 => Instruction::I32Store { offset },
2023            Int::U16 => Instruction::I32Store16 { offset },
2024            Int::U8 => Instruction::I32Store8 { offset },
2025        });
2026    }
2027
2028    fn deallocate(&mut self, ty: &Type, addr: B::Operand, offset: i32) {
2029        use Instruction::*;
2030
2031        // No need to execute any instructions if this type itself doesn't
2032        // require any form of post-return.
2033        if !needs_post_return(self.resolve, ty) {
2034            return;
2035        }
2036
2037        match *ty {
2038            Type::String => {
2039                self.stack.push(addr.clone());
2040                self.emit(&Instruction::PointerLoad { offset });
2041                self.stack.push(addr);
2042                self.emit(&Instruction::LengthLoad { offset: offset + 4 });
2043                self.emit(&Instruction::GuestDeallocateString);
2044            }
2045
2046            Type::Bool
2047            | Type::U8
2048            | Type::S8
2049            | Type::U16
2050            | Type::S16
2051            | Type::U32
2052            | Type::S32
2053            | Type::Char
2054            | Type::U64
2055            | Type::S64
2056            | Type::F32
2057            | Type::F64
2058            | Type::ErrorContext => {}
2059
2060            Type::Id(id) => match &self.resolve.types[id].kind {
2061                TypeDefKind::Type(t) => self.deallocate(t, addr, offset),
2062
2063                TypeDefKind::List(element) => {
2064                    self.stack.push(addr.clone());
2065                    self.emit(&Instruction::PointerLoad { offset });
2066                    self.stack.push(addr);
2067                    self.emit(&Instruction::LengthLoad { offset: offset + 4 });
2068
2069                    self.push_block();
2070                    self.emit(&IterBasePointer);
2071                    let elemaddr = self.stack.pop().unwrap();
2072                    self.deallocate(element, elemaddr, 0);
2073                    self.finish_block(0);
2074
2075                    self.emit(&Instruction::GuestDeallocateList { element });
2076                }
2077
2078                TypeDefKind::Handle(_) => {
2079                    todo!()
2080                }
2081
2082                TypeDefKind::Resource => {
2083                    todo!()
2084                }
2085
2086                TypeDefKind::Record(record) => {
2087                    self.deallocate_fields(
2088                        &record.fields.iter().map(|f| f.ty).collect::<Vec<_>>(),
2089                        addr,
2090                        offset,
2091                    );
2092                }
2093
2094                TypeDefKind::Tuple(tuple) => {
2095                    self.deallocate_fields(&tuple.types, addr, offset);
2096                }
2097
2098                TypeDefKind::Flags(_) => {}
2099
2100                TypeDefKind::Variant(variant) => {
2101                    self.deallocate_variant(
2102                        offset,
2103                        addr,
2104                        variant.tag(),
2105                        variant.cases.iter().map(|c| c.ty.as_ref()),
2106                    );
2107                    self.emit(&GuestDeallocateVariant {
2108                        blocks: variant.cases.len(),
2109                    });
2110                }
2111
2112                TypeDefKind::Option(t) => {
2113                    self.deallocate_variant(offset, addr, Int::U8, [None, Some(t)]);
2114                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2115                }
2116
2117                TypeDefKind::Result(e) => {
2118                    self.deallocate_variant(offset, addr, Int::U8, [e.ok.as_ref(), e.err.as_ref()]);
2119                    self.emit(&GuestDeallocateVariant { blocks: 2 });
2120                }
2121
2122                TypeDefKind::Enum(_) => {}
2123
2124                TypeDefKind::Future(_) => todo!("read future from memory"),
2125                TypeDefKind::Stream(_) => todo!("read stream from memory"),
2126                TypeDefKind::Unknown => unreachable!(),
2127            },
2128        }
2129    }
2130
2131    fn deallocate_variant<'b>(
2132        &mut self,
2133        offset: i32,
2134        addr: B::Operand,
2135        tag: Int,
2136        cases: impl IntoIterator<Item = Option<&'b Type>> + Clone,
2137    ) {
2138        self.stack.push(addr.clone());
2139        self.load_intrepr(offset, tag);
2140        let payload_offset = offset
2141            + (self
2142                .bindgen
2143                .sizes()
2144                .payload_offset(tag, cases.clone())
2145                .size_wasm32() as i32);
2146        for ty in cases {
2147            self.push_block();
2148            if let Some(ty) = ty {
2149                self.deallocate(ty, addr.clone(), payload_offset);
2150            }
2151            self.finish_block(0);
2152        }
2153    }
2154
2155    fn deallocate_fields(&mut self, tys: &[Type], addr: B::Operand, offset: i32) {
2156        for (field_offset, ty) in self.bindgen.sizes().field_offsets(tys) {
2157            let field_offset = field_offset.size_wasm32();
2158            self.deallocate(ty, addr.clone(), offset + (field_offset as i32));
2159        }
2160    }
2161}
2162
2163fn cast(from: WasmType, to: WasmType) -> Bitcast {
2164    use WasmType::*;
2165
2166    match (from, to) {
2167        (I32, I32)
2168        | (I64, I64)
2169        | (F32, F32)
2170        | (F64, F64)
2171        | (Pointer, Pointer)
2172        | (PointerOrI64, PointerOrI64)
2173        | (Length, Length) => Bitcast::None,
2174
2175        (I32, I64) => Bitcast::I32ToI64,
2176        (F32, I32) => Bitcast::F32ToI32,
2177        (F64, I64) => Bitcast::F64ToI64,
2178
2179        (I64, I32) => Bitcast::I64ToI32,
2180        (I32, F32) => Bitcast::I32ToF32,
2181        (I64, F64) => Bitcast::I64ToF64,
2182
2183        (F32, I64) => Bitcast::F32ToI64,
2184        (I64, F32) => Bitcast::I64ToF32,
2185
2186        (I64, PointerOrI64) => Bitcast::I64ToP64,
2187        (Pointer, PointerOrI64) => Bitcast::PToP64,
2188        (_, PointerOrI64) => {
2189            Bitcast::Sequence(Box::new([cast(from, I64), cast(I64, PointerOrI64)]))
2190        }
2191
2192        (PointerOrI64, I64) => Bitcast::P64ToI64,
2193        (PointerOrI64, Pointer) => Bitcast::P64ToP,
2194        (PointerOrI64, _) => Bitcast::Sequence(Box::new([cast(PointerOrI64, I64), cast(I64, to)])),
2195
2196        (I32, Pointer) => Bitcast::I32ToP,
2197        (Pointer, I32) => Bitcast::PToI32,
2198        (I32, Length) => Bitcast::I32ToL,
2199        (Length, I32) => Bitcast::LToI32,
2200        (I64, Length) => Bitcast::I64ToL,
2201        (Length, I64) => Bitcast::LToI64,
2202        (Pointer, Length) => Bitcast::PToL,
2203        (Length, Pointer) => Bitcast::LToP,
2204
2205        (F32, Pointer | Length) => Bitcast::Sequence(Box::new([cast(F32, I32), cast(I32, to)])),
2206        (Pointer | Length, F32) => Bitcast::Sequence(Box::new([cast(from, I32), cast(I32, F32)])),
2207
2208        (F32, F64)
2209        | (F64, F32)
2210        | (F64, I32)
2211        | (I32, F64)
2212        | (Pointer | Length, I64 | F64)
2213        | (I64 | F64, Pointer | Length) => {
2214            unreachable!("Don't know how to bitcast from {:?} to {:?}", from, to);
2215        }
2216    }
2217}
2218
2219fn align_to(val: usize, align: usize) -> usize {
2220    (val + align - 1) & !(align - 1)
2221}