1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
//! Backtrace and stack walking functionality for Wasm.
//!
//! Walking the Wasm stack is comprised of
//!
//! 1. identifying sequences of contiguous Wasm frames on the stack
//!    (i.e. skipping over native host frames), and
//!
//! 2. walking the Wasm frames within such a sequence.
//!
//! To perform (1) we maintain the entry stack pointer (SP) and exit frame
//! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm
//! calls into the host via trampolines (see
//! `crates/runtime/src/trampolines`). The most recent entry is stored in
//! `VMRuntimeLimits` and older entries are saved in `CallThreadState`. This
//! lets us identify ranges of contiguous Wasm frames on the stack.
//!
//! To solve (2) and walk the Wasm frames within a region of contiguous Wasm
//! frames on the stack, we configure Cranelift's `preserve_frame_pointers =
//! true` setting. Then we can do simple frame pointer traversal starting at the
//! exit FP and stopping once we reach the entry SP (meaning that the next older
//! frame is a host frame).

use crate::{
    traphandlers::{tls, CallThreadState},
    VMRuntimeLimits,
};
use cfg_if::cfg_if;
use std::ops::ControlFlow;

// Architecture-specific bits for stack walking. Each of these modules should
// define and export the following functions:
//
// * `unsafe fn get_next_older_pc_from_fp(fp: usize) -> usize`
// * `unsafe fn get_next_older_fp_from_fp(fp: usize) -> usize`
// * `fn reached_entry_sp(fp: usize, first_wasm_sp: usize) -> bool`
// * `fn assert_entry_sp_is_aligned(sp: usize)`
// * `fn assert_fp_is_aligned(fp: usize)`
cfg_if! {
    if #[cfg(target_arch = "x86_64")] {
        mod x86_64;
        use x86_64 as arch;
    } else if #[cfg(target_arch = "aarch64")] {
        mod aarch64;
        use aarch64 as arch;
    } else if #[cfg(target_arch = "s390x")] {
        mod s390x;
        use s390x as arch;
    } else if #[cfg(target_arch = "riscv64")] {
        mod riscv64;
        use riscv64 as arch;
    } else {
        compile_error!("unsupported architecture");
    }
}

/// A WebAssembly stack trace.
#[derive(Debug)]
pub struct Backtrace(Vec<Frame>);

/// A stack frame within a Wasm stack trace.
#[derive(Debug)]
pub struct Frame {
    pc: usize,
    fp: usize,
}

impl Frame {
    /// Get this frame's program counter.
    pub fn pc(&self) -> usize {
        self.pc
    }

    /// Get this frame's frame pointer.
    pub fn fp(&self) -> usize {
        self.fp
    }
}

impl Backtrace {
    /// Returns an empty backtrace
    pub fn empty() -> Backtrace {
        Backtrace(Vec::new())
    }

    /// Capture the current Wasm stack in a backtrace.
    pub fn new(limits: *const VMRuntimeLimits) -> Backtrace {
        tls::with(|state| match state {
            Some(state) => unsafe { Self::new_with_trap_state(limits, state, None) },
            None => Backtrace(vec![]),
        })
    }

    /// Capture the current Wasm stack trace.
    ///
    /// If Wasm hit a trap, and we calling this from the trap handler, then the
    /// Wasm exit trampoline didn't run, and we use the provided PC and FP
    /// instead of looking them up in `VMRuntimeLimits`.
    pub(crate) unsafe fn new_with_trap_state(
        limits: *const VMRuntimeLimits,
        state: &CallThreadState,
        trap_pc_and_fp: Option<(usize, usize)>,
    ) -> Backtrace {
        let mut frames = vec![];
        Self::trace_with_trap_state(limits, state, trap_pc_and_fp, |frame| {
            frames.push(frame);
            ControlFlow::Continue(())
        });
        Backtrace(frames)
    }

    /// Walk the current Wasm stack, calling `f` for each frame we walk.
    pub fn trace(limits: *const VMRuntimeLimits, f: impl FnMut(Frame) -> ControlFlow<()>) {
        tls::with(|state| match state {
            Some(state) => unsafe { Self::trace_with_trap_state(limits, state, None, f) },
            None => {}
        });
    }

    /// Walk the current Wasm stack, calling `f` for each frame we walk.
    ///
    /// If Wasm hit a trap, and we calling this from the trap handler, then the
    /// Wasm exit trampoline didn't run, and we use the provided PC and FP
    /// instead of looking them up in `VMRuntimeLimits`.
    pub(crate) unsafe fn trace_with_trap_state(
        limits: *const VMRuntimeLimits,
        state: &CallThreadState,
        trap_pc_and_fp: Option<(usize, usize)>,
        mut f: impl FnMut(Frame) -> ControlFlow<()>,
    ) {
        log::trace!("====== Capturing Backtrace ======");

        let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
            // If we exited Wasm by catching a trap, then the Wasm-to-host
            // trampoline did not get a chance to save the last Wasm PC and FP,
            // and we need to use the plumbed-through values instead.
            Some((pc, fp)) => {
                assert!(std::ptr::eq(limits, state.limits));
                (pc, fp)
            }
            // Either there is no Wasm currently on the stack, or we exited Wasm
            // through the Wasm-to-host trampoline.
            None => {
                let pc = *(*limits).last_wasm_exit_pc.get();
                let fp = *(*limits).last_wasm_exit_fp.get();
                (pc, fp)
            }
        };

        let activations = std::iter::once((
            last_wasm_exit_pc,
            last_wasm_exit_fp,
            *(*limits).last_wasm_entry_sp.get(),
        ))
        .chain(
            state
                .iter()
                .filter(|state| std::ptr::eq(limits, state.limits))
                .map(|state| {
                    (
                        state.old_last_wasm_exit_pc(),
                        state.old_last_wasm_exit_fp(),
                        state.old_last_wasm_entry_sp(),
                    )
                }),
        )
        .take_while(|&(pc, fp, sp)| {
            if pc == 0 {
                debug_assert_eq!(fp, 0);
                debug_assert_eq!(sp, 0);
            }
            pc != 0
        });

        for (pc, fp, sp) in activations {
            if let ControlFlow::Break(()) = Self::trace_through_wasm(pc, fp, sp, &mut f) {
                log::trace!("====== Done Capturing Backtrace (closure break) ======");
                return;
            }
        }

        log::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
    }

    /// Walk through a contiguous sequence of Wasm frames starting with the
    /// frame at the given PC and FP and ending at `trampoline_sp`.
    unsafe fn trace_through_wasm(
        mut pc: usize,
        mut fp: usize,
        trampoline_sp: usize,
        mut f: impl FnMut(Frame) -> ControlFlow<()>,
    ) -> ControlFlow<()> {
        log::trace!("=== Tracing through contiguous sequence of Wasm frames ===");
        log::trace!("trampoline_sp = 0x{:016x}", trampoline_sp);
        log::trace!("   initial pc = 0x{:016x}", pc);
        log::trace!("   initial fp = 0x{:016x}", fp);

        // We already checked for this case in the `trace_with_trap_state`
        // caller.
        assert_ne!(pc, 0);
        assert_ne!(fp, 0);
        assert_ne!(trampoline_sp, 0);

        arch::assert_entry_sp_is_aligned(trampoline_sp);

        loop {
            // At the start of each iteration of the loop, we know that `fp` is
            // a frame pointer from Wasm code. Therefore, we know it is not
            // being used as an extra general-purpose register, and it is safe
            // dereference to get the PC and the next older frame pointer.

            // The stack grows down, and therefore any frame pointer we are
            // dealing with should be less than the stack pointer on entry
            // to Wasm.
            assert!(trampoline_sp >= fp, "{trampoline_sp:#x} >= {fp:#x}");

            arch::assert_fp_is_aligned(fp);

            log::trace!("--- Tracing through one Wasm frame ---");
            log::trace!("pc = {:p}", pc as *const ());
            log::trace!("fp = {:p}", fp as *const ());

            f(Frame { pc, fp })?;

            pc = arch::get_next_older_pc_from_fp(fp);

            // We rely on this offset being zero for all supported architectures
            // in `crates/cranelift/src/component/compiler.rs` when we set the
            // Wasm exit FP. If this ever changes, we will need to update that
            // code as well!
            assert_eq!(arch::NEXT_OLDER_FP_FROM_FP_OFFSET, 0);

            // Get the next older frame pointer from the current Wasm frame
            // pointer.
            //
            // The next older frame pointer may or may not be a Wasm frame's
            // frame pointer, but it is trusted either way (i.e. is actually a
            // frame pointer and not being used as a general-purpose register)
            // because we always enter Wasm from the host via a trampoline, and
            // this trampoline maintains a proper frame pointer.
            //
            // We want to detect when we've reached the trampoline, and break
            // out of this stack-walking loop. All of our architectures' stacks
            // grow down and look something vaguely like this:
            //
            //     | ...               |
            //     | Native Frames     |
            //     | ...               |
            //     |-------------------|
            //     | ...               | <-- Trampoline FP            |
            //     | Trampoline Frame  |                              |
            //     | ...               | <-- Trampoline SP            |
            //     |-------------------|                            Stack
            //     | Return Address    |                            Grows
            //     | Previous FP       | <-- Wasm FP                Down
            //     | ...               |                              |
            //     | Wasm Frames       |                              |
            //     | ...               |                              V
            //
            // The trampoline records its own stack pointer (`trampoline_sp`),
            // which is guaranteed to be above all Wasm frame pointers but at or
            // below its own frame pointer. It is usually two words above the
            // Wasm frame pointer (at least on x86-64, exact details vary across
            // architectures) but not always: if the first Wasm function called
            // by the host has many arguments, some of them could be passed on
            // the stack in between the return address and the trampoline's
            // frame.
            //
            // To check when we've reached the trampoline frame, it is therefore
            // sufficient to check when the next frame pointer is greater than
            // or equal to `trampoline_sp` (except s390x, where it needs to be
            // strictly greater than).
            let next_older_fp = *(fp as *mut usize).add(arch::NEXT_OLDER_FP_FROM_FP_OFFSET);
            if arch::reached_entry_sp(next_older_fp, trampoline_sp) {
                log::trace!("=== Done tracing contiguous sequence of Wasm frames ===");
                return ControlFlow::Continue(());
            }

            // Because the stack always grows down, the older FP must be greater
            // than the current FP.
            assert!(next_older_fp > fp, "{next_older_fp:#x} > {fp:#x}");
            fp = next_older_fp;
        }
    }

    /// Iterate over the frames inside this backtrace.
    pub fn frames<'a>(
        &'a self,
    ) -> impl ExactSizeIterator<Item = &'a Frame> + DoubleEndedIterator + 'a {
        self.0.iter()
    }
}