wasmtime/runtime/vm/traphandlers/backtrace.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
//! Backtrace and stack walking functionality for Wasm.
//!
//! Walking the Wasm stack is comprised of
//!
//! 1. identifying sequences of contiguous Wasm frames on the stack
//! (i.e. skipping over native host frames), and
//!
//! 2. walking the Wasm frames within such a sequence.
//!
//! To perform (1) we maintain the entry stack pointer (SP) and exit frame
//! pointer (FP) and program counter (PC) each time we call into Wasm and Wasm
//! calls into the host via trampolines (see
//! `crates/wasmtime/src/runtime/vm/trampolines`). The most recent entry is
//! stored in `VMStoreContext` and older entries are saved in
//! `CallThreadState`. This lets us identify ranges of contiguous Wasm frames on
//! the stack.
//!
//! To solve (2) and walk the Wasm frames within a region of contiguous Wasm
//! frames on the stack, we configure Cranelift's `preserve_frame_pointers =
//! true` setting. Then we can do simple frame pointer traversal starting at the
//! exit FP and stopping once we reach the entry SP (meaning that the next older
//! frame is a host frame).
use crate::prelude::*;
use crate::runtime::store::StoreOpaque;
use crate::runtime::vm::{
traphandlers::{tls, CallThreadState},
Unwind, VMStoreContext,
};
use core::ops::ControlFlow;
/// A WebAssembly stack trace.
#[derive(Debug)]
pub struct Backtrace(Vec<Frame>);
/// A stack frame within a Wasm stack trace.
#[derive(Debug)]
pub struct Frame {
pc: usize,
#[cfg_attr(
not(feature = "gc"),
expect(dead_code, reason = "not worth #[cfg] annotations to remove")
)]
fp: usize,
}
impl Frame {
/// Get this frame's program counter.
pub fn pc(&self) -> usize {
self.pc
}
/// Get this frame's frame pointer.
#[cfg(feature = "gc")]
pub fn fp(&self) -> usize {
self.fp
}
}
impl Backtrace {
/// Returns an empty backtrace
pub fn empty() -> Backtrace {
Backtrace(Vec::new())
}
/// Capture the current Wasm stack in a backtrace.
pub fn new(store: &StoreOpaque) -> Backtrace {
let vm_store_context = store.vm_store_context();
let unwind = store.unwinder();
tls::with(|state| match state {
Some(state) => unsafe {
Self::new_with_trap_state(vm_store_context, unwind, state, None)
},
None => Backtrace(vec![]),
})
}
/// Capture the current Wasm stack trace.
///
/// If Wasm hit a trap, and we calling this from the trap handler, then the
/// Wasm exit trampoline didn't run, and we use the provided PC and FP
/// instead of looking them up in `VMStoreContext`.
pub(crate) unsafe fn new_with_trap_state(
vm_store_context: *const VMStoreContext,
unwind: &dyn Unwind,
state: &CallThreadState,
trap_pc_and_fp: Option<(usize, usize)>,
) -> Backtrace {
let mut frames = vec![];
Self::trace_with_trap_state(vm_store_context, unwind, state, trap_pc_and_fp, |frame| {
frames.push(frame);
ControlFlow::Continue(())
});
Backtrace(frames)
}
/// Walk the current Wasm stack, calling `f` for each frame we walk.
#[cfg(feature = "gc")]
pub fn trace(store: &StoreOpaque, f: impl FnMut(Frame) -> ControlFlow<()>) {
let vm_store_context = store.vm_store_context();
let unwind = store.unwinder();
tls::with(|state| match state {
Some(state) => unsafe {
Self::trace_with_trap_state(vm_store_context, unwind, state, None, f)
},
None => {}
});
}
/// Walk the current Wasm stack, calling `f` for each frame we walk.
///
/// If Wasm hit a trap, and we calling this from the trap handler, then the
/// Wasm exit trampoline didn't run, and we use the provided PC and FP
/// instead of looking them up in `VMStoreContext`.
pub(crate) unsafe fn trace_with_trap_state(
vm_store_context: *const VMStoreContext,
unwind: &dyn Unwind,
state: &CallThreadState,
trap_pc_and_fp: Option<(usize, usize)>,
mut f: impl FnMut(Frame) -> ControlFlow<()>,
) {
log::trace!("====== Capturing Backtrace ======");
let (last_wasm_exit_pc, last_wasm_exit_fp) = match trap_pc_and_fp {
// If we exited Wasm by catching a trap, then the Wasm-to-host
// trampoline did not get a chance to save the last Wasm PC and FP,
// and we need to use the plumbed-through values instead.
Some((pc, fp)) => {
assert!(core::ptr::eq(
vm_store_context,
state.vm_store_context.as_ptr()
));
(pc, fp)
}
// Either there is no Wasm currently on the stack, or we exited Wasm
// through the Wasm-to-host trampoline.
None => {
let pc = *(*vm_store_context).last_wasm_exit_pc.get();
let fp = *(*vm_store_context).last_wasm_exit_fp.get();
(pc, fp)
}
};
let activations = core::iter::once((
last_wasm_exit_pc,
last_wasm_exit_fp,
*(*vm_store_context).last_wasm_entry_fp.get(),
))
.chain(
state
.iter()
.filter(|state| core::ptr::eq(vm_store_context, state.vm_store_context.as_ptr()))
.map(|state| {
(
state.old_last_wasm_exit_pc(),
state.old_last_wasm_exit_fp(),
state.old_last_wasm_entry_fp(),
)
}),
)
.take_while(|&(pc, fp, sp)| {
if pc == 0 {
debug_assert_eq!(fp, 0);
debug_assert_eq!(sp, 0);
}
pc != 0
});
for (pc, fp, sp) in activations {
if let ControlFlow::Break(()) = Self::trace_through_wasm(unwind, pc, fp, sp, &mut f) {
log::trace!("====== Done Capturing Backtrace (closure break) ======");
return;
}
}
log::trace!("====== Done Capturing Backtrace (reached end of activations) ======");
}
/// Walk through a contiguous sequence of Wasm frames starting with the
/// frame at the given PC and FP and ending at `trampoline_sp`.
unsafe fn trace_through_wasm(
unwind: &dyn Unwind,
mut pc: usize,
mut fp: usize,
trampoline_fp: usize,
mut f: impl FnMut(Frame) -> ControlFlow<()>,
) -> ControlFlow<()> {
log::trace!("=== Tracing through contiguous sequence of Wasm frames ===");
log::trace!("trampoline_fp = 0x{:016x}", trampoline_fp);
log::trace!(" initial pc = 0x{:016x}", pc);
log::trace!(" initial fp = 0x{:016x}", fp);
// We already checked for this case in the `trace_with_trap_state`
// caller.
assert_ne!(pc, 0);
assert_ne!(fp, 0);
assert_ne!(trampoline_fp, 0);
// This loop will walk the linked list of frame pointers starting at
// `fp` and going up until `trampoline_fp`. We know that both `fp` and
// `trampoline_fp` are "trusted values" aka generated and maintained by
// Cranelift. This means that it should be safe to walk the linked list
// of pointers and inspect wasm frames.
//
// Note, though, that any frames outside of this range are not
// guaranteed to have valid frame pointers. For example native code
// might be using the frame pointer as a general purpose register. Thus
// we need to be careful to only walk frame pointers in this one
// contiguous linked list.
//
// To know when to stop iteration all architectures' stacks currently
// look something like this:
//
// | ... |
// | Native Frames |
// | ... |
// |-------------------|
// | ... | <-- Trampoline FP |
// | Trampoline Frame | |
// | ... | <-- Trampoline SP |
// |-------------------| Stack
// | Return Address | Grows
// | Previous FP | <-- Wasm FP Down
// | ... | |
// | Wasm Frames | |
// | ... | V
//
// The trampoline records its own frame pointer (`trampoline_fp`),
// which is guaranteed to be above all Wasm. To check when we've
// reached the trampoline frame, it is therefore sufficient to
// check when the next frame pointer is equal to `trampoline_fp`. Once
// that's hit then we know that the entire linked list has been
// traversed.
//
// Note that it might be possible that this loop doesn't execute at all.
// For example if the entry trampoline called wasm which `return_call`'d
// an imported function which is an exit trampoline, then
// `fp == trampoline_fp` on the entry of this function, meaning the loop
// won't actually execute anything.
while fp != trampoline_fp {
// At the start of each iteration of the loop, we know that `fp` is
// a frame pointer from Wasm code. Therefore, we know it is not
// being used as an extra general-purpose register, and it is safe
// dereference to get the PC and the next older frame pointer.
//
// The stack also grows down, and therefore any frame pointer we are
// dealing with should be less than the frame pointer on entry to
// Wasm. Finally also assert that it's aligned correctly as an
// additional sanity check.
assert!(trampoline_fp > fp, "{trampoline_fp:#x} > {fp:#x}");
unwind.assert_fp_is_aligned(fp);
log::trace!("--- Tracing through one Wasm frame ---");
log::trace!("pc = {:p}", pc as *const ());
log::trace!("fp = {:p}", fp as *const ());
f(Frame { pc, fp })?;
pc = unwind.get_next_older_pc_from_fp(fp);
// We rely on this offset being zero for all supported architectures
// in `crates/cranelift/src/component/compiler.rs` when we set the
// Wasm exit FP. If this ever changes, we will need to update that
// code as well!
assert_eq!(unwind.next_older_fp_from_fp_offset(), 0);
// Get the next older frame pointer from the current Wasm frame
// pointer.
let next_older_fp = *(fp as *mut usize).add(unwind.next_older_fp_from_fp_offset());
// Because the stack always grows down, the older FP must be greater
// than the current FP.
assert!(next_older_fp > fp, "{next_older_fp:#x} > {fp:#x}");
fp = next_older_fp;
}
log::trace!("=== Done tracing contiguous sequence of Wasm frames ===");
ControlFlow::Continue(())
}
/// Iterate over the frames inside this backtrace.
pub fn frames<'a>(
&'a self,
) -> impl ExactSizeIterator<Item = &'a Frame> + DoubleEndedIterator + 'a {
self.0.iter()
}
}