wasmtime/runtime/vm/stack_switching.rs
1//! This module contains the runtime components of the implementation of the
2//! stack switching proposal.
3
4mod stack;
5
6use core::{marker::PhantomPinned, ptr::NonNull};
7
8pub use stack::*;
9
10/// A continuation object is a handle to a continuation reference
11/// (i.e. an actual stack). A continuation object only be consumed
12/// once. The linearity is checked dynamically in the generated code
13/// by comparing the revision witness embedded in the pointer to the
14/// actual revision counter on the continuation reference.
15///
16/// In the optimized implementation, the continuation logically
17/// represented by a VMContObj not only encompasses the pointed-to
18/// VMContRef, but also all of its parents:
19///
20/// ```text
21///
22/// +----------------+
23/// +-->| VMContRef |
24/// | +----------------+
25/// | ^
26/// | | parent
27/// | |
28/// | +----------------+
29/// | | VMContRef |
30/// | +----------------+
31/// | ^
32/// | | parent
33/// last ancestor | |
34/// | +----------------+
35/// +---| VMContRef | <-- VMContObj
36/// +----------------+
37/// ```
38///
39/// For performance reasons, the VMContRef at the bottom of this chain
40/// (i.e., the one pointed to by the VMContObj) has a pointer to the
41/// other end of the chain (i.e., its last ancestor).
42#[repr(C)]
43#[derive(Debug, Clone, Copy)]
44pub struct VMContObj {
45 pub contref: NonNull<VMContRef>,
46 pub revision: usize,
47}
48
49impl VMContObj {
50 pub fn new(contref: NonNull<VMContRef>, revision: usize) -> Self {
51 Self { contref, revision }
52 }
53
54 /// Construction a VMContinuationObject from a pointer and revision
55 ///
56 /// The `contref` pointer may be null in which case None will be returned.
57 ///
58 /// # Safety
59 ///
60 /// Behavior will be undefined if a pointer to data that is not a
61 /// VMContRef is provided.
62 pub unsafe fn from_raw_parts(contref: *mut u8, revision: usize) -> Option<Self> {
63 NonNull::new(contref.cast::<VMContRef>()).map(|contref| Self::new(contref, revision))
64 }
65}
66
67unsafe impl Send for VMContObj {}
68unsafe impl Sync for VMContObj {}
69
70/// This type is used to save (and subsequently restore) a subset of the data in
71/// `VMStoreContext`. See documentation of `VMStackChain` for the exact uses.
72#[repr(C)]
73#[derive(Debug, Default, Clone)]
74pub struct VMStackLimits {
75 /// Saved version of `stack_limit` field of `VMStoreContext`
76 pub stack_limit: usize,
77 /// Saved version of `last_wasm_entry_fp` field of `VMStoreContext`
78 pub last_wasm_entry_fp: usize,
79}
80
81/// This type represents "common" information that we need to save both for the
82/// initial stack and each continuation.
83#[repr(C)]
84#[derive(Debug, Clone)]
85pub struct VMCommonStackInformation {
86 /// Saves subset of `VMStoreContext` for this stack. See documentation of
87 /// `VMStackChain` for the exact uses.
88 pub limits: VMStackLimits,
89 /// For the initial stack, this field must only have one of the following values:
90 /// - Running
91 /// - Parent
92 pub state: VMStackState,
93
94 /// Only in use when state is `Parent`. Otherwise, the list must be empty.
95 ///
96 /// Represents the handlers that this stack installed when resume-ing a
97 /// continuation.
98 ///
99 /// Note that for any resume instruction, we can re-order the handler
100 /// clauses without changing behavior such that all the suspend handlers
101 /// come first, followed by all the switch handler (while maintaining the
102 /// original ordering within the two groups).
103 /// Thus, we assume that the given resume instruction has the following
104 /// shape:
105 ///
106 /// (resume $ct
107 /// (on $tag_0 $block_0) ... (on $tag_{n-1} $block_{n-1})
108 /// (on $tag_n switch) ... (on $tag_m switch)
109 /// )
110 ///
111 /// On resume, the handler list is then filled with m + 1 (i.e., one per
112 /// handler clause) entries such that the i-th entry, using 0-based
113 /// indexing, is the identifier of $tag_i (represented as *mut
114 /// VMTagDefinition).
115 /// Further, `first_switch_handler_index` (see below) is set to n (i.e., the
116 /// 0-based index of the first switch handler).
117 ///
118 /// Note that the actual data buffer (i.e., the one `handler.data` points
119 /// to) is always allocated on the stack that this `CommonStackInformation`
120 /// struct describes.
121 pub handlers: VMHandlerList,
122
123 /// Only used when state is `Parent`. See documentation of `handlers` above.
124 pub first_switch_handler_index: u32,
125}
126
127impl VMCommonStackInformation {
128 /// Default value with state set to `Running`
129 pub fn running_default() -> Self {
130 Self {
131 limits: VMStackLimits::default(),
132 state: VMStackState::Running,
133 handlers: VMHandlerList::empty(),
134 first_switch_handler_index: 0,
135 }
136 }
137}
138
139impl VMStackLimits {
140 /// Default value, but uses the given value for `stack_limit`.
141 pub fn with_stack_limit(stack_limit: usize) -> Self {
142 Self {
143 stack_limit,
144 ..Default::default()
145 }
146 }
147}
148
149#[repr(C)]
150#[derive(Debug, Clone)]
151/// Reference to a stack-allocated buffer ("array"), storing data of some type
152/// `T`.
153pub struct VMHostArray<T> {
154 /// Number of currently occupied slots.
155 pub length: u32,
156 /// Number of slots in the data buffer. Note that this is *not* the size of
157 /// the buffer in bytes!
158 pub capacity: u32,
159 /// The actual data buffer
160 pub data: *mut T,
161}
162
163impl<T> VMHostArray<T> {
164 /// Creates empty `Array`
165 pub fn empty() -> Self {
166 Self {
167 length: 0,
168 capacity: 0,
169 data: core::ptr::null_mut(),
170 }
171 }
172
173 /// Makes `Array` empty.
174 pub fn clear(&mut self) {
175 *self = Self::empty();
176 }
177}
178
179/// Type used for passing payloads to and from continuations. The actual type
180/// argument should be wasmtime::runtime::vm::vmcontext::ValRaw, but we don't
181/// have access to that here.
182pub type VMPayloads = VMHostArray<u128>;
183
184/// Type for a list of handlers, represented by the handled tag. Thus, the
185/// stored data is actually `*mut VMTagDefinition`, but we don't havr access to
186/// that here.
187pub type VMHandlerList = VMHostArray<*mut u8>;
188
189/// The main type representing a continuation.
190#[repr(C)]
191pub struct VMContRef {
192 /// The `CommonStackInformation` of this continuation's stack.
193 pub common_stack_information: VMCommonStackInformation,
194
195 /// The parent of this continuation, which may be another continuation, the
196 /// initial stack, or absent (in case of a suspended continuation).
197 pub parent_chain: VMStackChain,
198
199 /// Only used if `common_stack_information.state` is `Suspended` or `Fresh`. In
200 /// that case, this points to the end of the stack chain (i.e., the
201 /// continuation in the parent chain whose own `parent_chain` field is
202 /// `VMStackChain::Absent`).
203 /// Note that this may be a pointer to itself (if the state is `Fresh`, this is always the case).
204 pub last_ancestor: *mut VMContRef,
205
206 /// Revision counter.
207 pub revision: usize,
208
209 /// The underlying stack.
210 pub stack: VMContinuationStack,
211
212 /// Used to store only
213 /// 1. The arguments to the function passed to cont.new
214 /// 2. The return values of that function
215 ///
216 /// Note that the actual data buffer (i.e., the one `args.data` points
217 /// to) is always allocated on this continuation's stack.
218 pub args: VMPayloads,
219
220 /// Once a continuation has been suspended (using suspend or switch),
221 /// this buffer is used to pass payloads to and from the continuation.
222 /// More concretely, it is used to
223 /// - Pass payloads from a suspend instruction to the corresponding handler.
224 /// - Pass payloads to a continuation using cont.bind or resume
225 /// - Pass payloads to the continuation being switched to when using switch.
226 ///
227 /// Note that the actual data buffer (i.e., the one `values.data` points
228 /// to) is always allocated on this continuation's stack.
229 pub values: VMPayloads,
230
231 /// Tell the compiler that this structure has potential self-references
232 /// through the `last_ancestor` pointer.
233 _marker: core::marker::PhantomPinned,
234}
235
236impl VMContRef {
237 pub fn fiber_stack(&self) -> &VMContinuationStack {
238 &self.stack
239 }
240
241 pub fn detach_stack(&mut self) -> VMContinuationStack {
242 core::mem::replace(&mut self.stack, VMContinuationStack::unallocated())
243 }
244
245 /// This is effectively a `Default` implementation, without calling it
246 /// so. Used to create `VMContRef`s when initializing pooling allocator.
247 pub fn empty() -> Self {
248 let limits = VMStackLimits::with_stack_limit(Default::default());
249 let state = VMStackState::Fresh;
250 let handlers = VMHandlerList::empty();
251 let common_stack_information = VMCommonStackInformation {
252 limits,
253 state,
254 handlers,
255 first_switch_handler_index: 0,
256 };
257 let parent_chain = VMStackChain::Absent;
258 let last_ancestor = core::ptr::null_mut();
259 let stack = VMContinuationStack::unallocated();
260 let args = VMPayloads::empty();
261 let values = VMPayloads::empty();
262 let revision = 0;
263 let _marker = PhantomPinned;
264
265 Self {
266 common_stack_information,
267 parent_chain,
268 last_ancestor,
269 stack,
270 args,
271 values,
272 revision,
273 _marker,
274 }
275 }
276}
277
278impl Drop for VMContRef {
279 fn drop(&mut self) {
280 // Note that continuation references do not own their parents, and we
281 // don't drop them here.
282
283 // We would like to enforce the invariant that any continuation that
284 // was created for a cont.new (rather than, say, just living in a
285 // pool and never being touched), either ran to completion or was
286 // cancelled. But failing to do so should yield a custom error,
287 // instead of panicking here.
288 }
289}
290
291// These are required so the WasmFX pooling allocator can store a Vec of
292// `VMContRef`s.
293unsafe impl Send for VMContRef {}
294unsafe impl Sync for VMContRef {}
295
296/// Implements `cont.new` instructions (i.e., creation of continuations).
297#[cfg(feature = "stack-switching")]
298#[inline(always)]
299pub fn cont_new(
300 store: &mut dyn crate::vm::VMStore,
301 instance: crate::store::InstanceId,
302 func: *mut u8,
303 param_count: u32,
304 result_count: u32,
305) -> anyhow::Result<*mut VMContRef> {
306 let instance = store.instance_mut(instance);
307 let caller_vmctx = instance.vmctx();
308
309 let stack_size = store.engine().config().async_stack_size;
310
311 let contref = store.allocate_continuation()?;
312 let contref = unsafe { contref.as_mut().unwrap() };
313
314 let tsp = contref.stack.top().unwrap();
315 contref.parent_chain = VMStackChain::Absent;
316 // The continuation is fresh, which is a special case of being suspended.
317 // Thus we need to set the correct end of the continuation chain: itself.
318 contref.last_ancestor = contref;
319
320 // The initialization function will allocate the actual args/return value buffer and
321 // update this object (if needed).
322 let contref_args_ptr = &mut contref.args as *mut _ as *mut VMHostArray<crate::ValRaw>;
323
324 contref.stack.initialize(
325 func.cast::<crate::vm::VMFuncRef>(),
326 caller_vmctx.as_ptr(),
327 contref_args_ptr,
328 param_count,
329 result_count,
330 );
331
332 // Now that the initial stack pointer was set by the initialization
333 // function, use it to determine stack limit.
334 let stack_pointer = contref.stack.control_context_stack_pointer();
335 // Same caveat regarding stack_limit here as described in
336 // `wasmtime::runtime::func::EntryStoreContext::enter_wasm`.
337 let wasm_stack_limit = core::cmp::max(
338 stack_pointer - store.engine().config().max_wasm_stack,
339 tsp as usize - stack_size,
340 );
341 let limits = VMStackLimits::with_stack_limit(wasm_stack_limit);
342 let csi = &mut contref.common_stack_information;
343 csi.state = VMStackState::Fresh;
344 csi.limits = limits;
345
346 log::trace!("Created contref @ {contref:p}");
347 Ok(contref)
348}
349
350/// This type represents a linked lists ("chain") of stacks, where the a
351/// node's successor denotes its parent.
352/// Additionally, a `CommonStackInformation` object is associated with
353/// each stack in the list.
354/// Here, a "stack" is one of the following:
355/// - A continuation (i.e., created with cont.new).
356/// - The initial stack. This is the stack that we were on when entering
357/// Wasm (i.e., when executing
358/// `crate::runtime::func::invoke_wasm_and_catch_traps`).
359/// This stack never has a parent.
360/// In terms of the memory allocation that this stack resides on, it will
361/// usually be the main stack, but doesn't have to: If we are running
362/// inside a continuation while executing a host call, which in turn
363/// re-renters Wasm, the initial stack is actually the stack of that
364/// continuation.
365///
366/// Note that the linked list character of `VMStackChain` arises from the fact
367/// that `VMStackChain::Continuation` variants have a pointer to a
368/// `VMContRef`, which in turn has a `parent_chain` value of type
369/// `VMStackChain`. This is how the stack chain reflects the parent-child
370/// relationships between continuations/stacks. This also shows how the
371/// initial stack (mentioned above) cannot have a parent.
372///
373/// There are generally two uses of `VMStackChain`:
374///
375/// 1. The `stack_chain` field in the `StoreOpaque` contains such a
376/// chain of stacks, where the head of the list denotes the stack that is
377/// currently executing (either a continuation or the initial stack). Note
378/// that in this case, the linked list must contain 0 or more `Continuation`
379/// elements, followed by a final `InitialStack` element. In particular,
380/// this list always ends with `InitialStack` and never contains an `Absent`
381/// variant.
382///
383/// 2. When a continuation is suspended, its chain of parents eventually
384/// ends with an `Absent` variant in its `parent_chain` field. Note that a
385/// suspended continuation never appears in the stack chain in the
386/// VMContext!
387///
388///
389/// As mentioned before, each stack in a `VMStackChain` has a corresponding
390/// `CommonStackInformation` object. For continuations, this is stored in
391/// the `common_stack_information` field of the corresponding `VMContRef`.
392/// For the initial stack, the `InitialStack` variant contains a pointer to
393/// a `CommonStackInformation`. The latter will be allocated allocated on
394/// the stack frame that executed by `invoke_wasm_and_catch_traps`.
395///
396/// The following invariants hold for these `VMStackLimits` objects,
397/// and the data in `VMStoreContext`.
398///
399/// Currently executing stack: For the currently executing stack (i.e., the
400/// stack that is at the head of the store's `stack_chain` list), the
401/// associated `VMStackLimits` object contains stale/undefined data. Instead,
402/// the live data describing the limits for the currently executing stack is
403/// always maintained in `VMStoreContext`. Note that as a general rule
404/// independently from any execution of continuations, the `last_wasm_exit*`
405/// fields in the `VMStoreContext` contain undefined values while executing
406/// wasm.
407///
408/// Parents of currently executing stack: For stacks that appear in the tail
409/// of the store's `stack_chain` list (i.e., stacks that are not currently
410/// executing themselves, but are an ancestor of the currently executing
411/// stack), we have the following: All the fields in the stack's
412/// `VMStackLimits` are valid, describing the stack's stack limit, and
413/// pointers where executing for that stack entered and exited WASM.
414///
415/// Suspended continuations: For suspended continuations (including their
416/// ancestors), we have the following. Note that the initial stack can never
417/// be in this state. The `stack_limit` and `last_enter_wasm_sp` fields of
418/// the corresponding `VMStackLimits` object contain valid data, while the
419/// `last_exit_wasm_*` fields contain arbitrary values. There is only one
420/// exception to this: Note that a continuation that has been created with
421/// cont.new, but never been resumed so far, is considered "suspended".
422/// However, its `last_enter_wasm_sp` field contains undefined data. This is
423/// justified, because when resume-ing a continuation for the first time, a
424/// native-to-wasm trampoline is called, which sets up the
425/// `last_wasm_entry_sp` in the `VMStoreContext` with the correct value,
426/// thus restoring the necessary invariant.
427#[derive(Debug, Clone, PartialEq)]
428#[repr(usize, C)]
429pub enum VMStackChain {
430 /// For suspended continuations, denotes the end of their chain of
431 /// ancestors.
432 Absent = wasmtime_environ::STACK_CHAIN_ABSENT_DISCRIMINANT,
433 /// Represents the initial stack (i.e., where we entered Wasm from the
434 /// host by executing
435 /// `crate::runtime::func::invoke_wasm_and_catch_traps`). Therefore, it
436 /// does not have a parent. The `CommonStackInformation` that this
437 /// variant points to is stored in the stack frame of
438 /// `invoke_wasm_and_catch_traps`.
439 InitialStack(*mut VMCommonStackInformation) =
440 wasmtime_environ::STACK_CHAIN_INITIAL_STACK_DISCRIMINANT,
441 /// Represents a continuation's stack.
442 Continuation(*mut VMContRef) = wasmtime_environ::STACK_CHAIN_CONTINUATION_DISCRIMINANT,
443}
444
445impl VMStackChain {
446 /// Indicates if `self` is a `InitialStack` variant.
447 pub fn is_initial_stack(&self) -> bool {
448 matches!(self, VMStackChain::InitialStack(_))
449 }
450
451 /// Returns an iterator over the continuations in this chain.
452 /// We don't implement `IntoIterator` because our iterator is unsafe, so at
453 /// least this gives us some way of indicating this, even though the actual
454 /// unsafety lies in the `next` function.
455 ///
456 /// # Safety
457 ///
458 /// This function is not unsafe per see, but it returns an object
459 /// whose usage is unsafe.
460 pub unsafe fn into_continuation_iter(self) -> ContinuationIterator {
461 ContinuationIterator(self)
462 }
463
464 /// Returns an iterator over the stack limits in this chain.
465 /// We don't implement `IntoIterator` because our iterator is unsafe, so at
466 /// least this gives us some way of indicating this, even though the actual
467 /// unsafety lies in the `next` function.
468 ///
469 /// # Safety
470 ///
471 /// This function is not unsafe per see, but it returns an object
472 /// whose usage is unsafe.
473 pub unsafe fn into_stack_limits_iter(self) -> StackLimitsIterator {
474 StackLimitsIterator(self)
475 }
476}
477
478/// Iterator for Continuations in a stack chain.
479pub struct ContinuationIterator(VMStackChain);
480
481/// Iterator for VMStackLimits in a stack chain.
482pub struct StackLimitsIterator(VMStackChain);
483
484impl Iterator for ContinuationIterator {
485 type Item = *mut VMContRef;
486
487 fn next(&mut self) -> Option<Self::Item> {
488 match self.0 {
489 VMStackChain::Absent | VMStackChain::InitialStack(_) => None,
490 VMStackChain::Continuation(ptr) => {
491 let continuation = unsafe { ptr.as_mut().unwrap() };
492 self.0 = continuation.parent_chain.clone();
493 Some(ptr)
494 }
495 }
496 }
497}
498
499impl Iterator for StackLimitsIterator {
500 type Item = *mut VMStackLimits;
501
502 fn next(&mut self) -> Option<Self::Item> {
503 match self.0 {
504 VMStackChain::Absent => None,
505 VMStackChain::InitialStack(csi) => {
506 let stack_limits = unsafe { &mut (*csi).limits } as *mut VMStackLimits;
507 self.0 = VMStackChain::Absent;
508 Some(stack_limits)
509 }
510 VMStackChain::Continuation(ptr) => {
511 let continuation = unsafe { ptr.as_mut().unwrap() };
512 let stack_limits =
513 (&mut continuation.common_stack_information.limits) as *mut VMStackLimits;
514 self.0 = continuation.parent_chain.clone();
515 Some(stack_limits)
516 }
517 }
518 }
519}
520
521/// Encodes the life cycle of a `VMContRef`.
522#[derive(Debug, Clone, Copy, PartialEq)]
523#[repr(u32)]
524pub enum VMStackState {
525 /// The `VMContRef` has been created, but neither `resume` or `switch` has ever been
526 /// called on it. During this stage, we may add arguments using `cont.bind`.
527 Fresh = wasmtime_environ::STACK_STATE_FRESH_DISCRIMINANT,
528 /// The continuation is running, meaning that it is the one currently
529 /// executing code.
530 Running = wasmtime_environ::STACK_STATE_RUNNING_DISCRIMINANT,
531 /// The continuation is suspended because it executed a resume instruction
532 /// that has not finished yet. In other words, it became the parent of
533 /// another continuation (which may itself be `Running`, a `Parent`, or
534 /// `Suspended`).
535 Parent = wasmtime_environ::STACK_STATE_PARENT_DISCRIMINANT,
536 /// The continuation was suspended by a `suspend` or `switch` instruction.
537 Suspended = wasmtime_environ::STACK_STATE_SUSPENDED_DISCRIMINANT,
538 /// The function originally passed to `cont.new` has returned normally.
539 /// Note that there is no guarantee that a VMContRef will ever
540 /// reach this status, as it may stay suspended until being dropped.
541 Returned = wasmtime_environ::STACK_STATE_RETURNED_DISCRIMINANT,
542}
543
544#[cfg(test)]
545mod tests {
546 use core::mem::{offset_of, size_of};
547
548 use wasmtime_environ::{HostPtr, Module, PtrSize, StaticModuleIndex, VMOffsets};
549
550 use super::*;
551
552 #[test]
553 fn null_pointer_optimization() {
554 // The Rust spec does not technically guarantee that the null pointer
555 // optimization applies to a struct containing a `NonNull`.
556 assert_eq!(size_of::<Option<VMContObj>>(), size_of::<VMContObj>());
557 }
558
559 #[test]
560 fn check_vm_stack_limits_offsets() {
561 let module = Module::new(StaticModuleIndex::from_u32(0));
562 let offsets = VMOffsets::new(HostPtr, &module);
563 assert_eq!(
564 offset_of!(VMStackLimits, stack_limit),
565 usize::from(offsets.ptr.vmstack_limits_stack_limit())
566 );
567 assert_eq!(
568 offset_of!(VMStackLimits, last_wasm_entry_fp),
569 usize::from(offsets.ptr.vmstack_limits_last_wasm_entry_fp())
570 );
571 }
572
573 #[test]
574 fn check_vm_common_stack_information_offsets() {
575 let module = Module::new(StaticModuleIndex::from_u32(0));
576 let offsets = VMOffsets::new(HostPtr, &module);
577 assert_eq!(
578 size_of::<VMCommonStackInformation>(),
579 usize::from(offsets.ptr.size_of_vmcommon_stack_information())
580 );
581 assert_eq!(
582 offset_of!(VMCommonStackInformation, limits),
583 usize::from(offsets.ptr.vmcommon_stack_information_limits())
584 );
585 assert_eq!(
586 offset_of!(VMCommonStackInformation, state),
587 usize::from(offsets.ptr.vmcommon_stack_information_state())
588 );
589 assert_eq!(
590 offset_of!(VMCommonStackInformation, handlers),
591 usize::from(offsets.ptr.vmcommon_stack_information_handlers())
592 );
593 assert_eq!(
594 offset_of!(VMCommonStackInformation, first_switch_handler_index),
595 usize::from(
596 offsets
597 .ptr
598 .vmcommon_stack_information_first_switch_handler_index()
599 )
600 );
601 }
602
603 #[test]
604 fn check_vm_array_offsets() {
605 // Note that the type parameter has no influence on the size and offsets.
606 let module = Module::new(StaticModuleIndex::from_u32(0));
607 let offsets = VMOffsets::new(HostPtr, &module);
608 assert_eq!(
609 size_of::<VMHostArray<()>>(),
610 usize::from(offsets.ptr.size_of_vmhostarray())
611 );
612 assert_eq!(
613 offset_of!(VMHostArray<()>, length),
614 usize::from(offsets.ptr.vmhostarray_length())
615 );
616 assert_eq!(
617 offset_of!(VMHostArray<()>, capacity),
618 usize::from(offsets.ptr.vmhostarray_capacity())
619 );
620 assert_eq!(
621 offset_of!(VMHostArray<()>, data),
622 usize::from(offsets.ptr.vmhostarray_data())
623 );
624 }
625
626 #[test]
627 fn check_vm_contobj_offsets() {
628 let module = Module::new(StaticModuleIndex::from_u32(0));
629 let offsets = VMOffsets::new(HostPtr, &module);
630 assert_eq!(
631 offset_of!(VMContObj, contref),
632 usize::from(offsets.ptr.vmcontobj_contref())
633 );
634 assert_eq!(
635 offset_of!(VMContObj, revision),
636 usize::from(offsets.ptr.vmcontobj_revision())
637 );
638 assert_eq!(
639 size_of::<VMContObj>(),
640 usize::from(offsets.ptr.size_of_vmcontobj())
641 )
642 }
643
644 #[test]
645 fn check_vm_contref_offsets() {
646 let module = Module::new(StaticModuleIndex::from_u32(0));
647 let offsets = VMOffsets::new(HostPtr, &module);
648 assert_eq!(
649 offset_of!(VMContRef, common_stack_information),
650 usize::from(offsets.ptr.vmcontref_common_stack_information())
651 );
652 assert_eq!(
653 offset_of!(VMContRef, parent_chain),
654 usize::from(offsets.ptr.vmcontref_parent_chain())
655 );
656 assert_eq!(
657 offset_of!(VMContRef, last_ancestor),
658 usize::from(offsets.ptr.vmcontref_last_ancestor())
659 );
660 // Some 32-bit platforms need this to be 8-byte aligned, some don't.
661 // So we need to make sure it always is, without padding.
662 assert_eq!(u8::vmcontref_revision(&4) % 8, 0);
663 assert_eq!(u8::vmcontref_revision(&8) % 8, 0);
664 assert_eq!(
665 offset_of!(VMContRef, revision),
666 usize::from(offsets.ptr.vmcontref_revision())
667 );
668 assert_eq!(
669 offset_of!(VMContRef, stack),
670 usize::from(offsets.ptr.vmcontref_stack())
671 );
672 assert_eq!(
673 offset_of!(VMContRef, args),
674 usize::from(offsets.ptr.vmcontref_args())
675 );
676 assert_eq!(
677 offset_of!(VMContRef, values),
678 usize::from(offsets.ptr.vmcontref_values())
679 );
680 }
681
682 #[test]
683 fn check_vm_stack_chain_offsets() {
684 let module = Module::new(StaticModuleIndex::from_u32(0));
685 let offsets = VMOffsets::new(HostPtr, &module);
686 assert_eq!(
687 size_of::<VMStackChain>(),
688 usize::from(offsets.ptr.size_of_vmstack_chain())
689 );
690 }
691}