wasmtime_internal_cranelift/func_environ/stack_switching/
instructions.rs

1use cranelift_codegen::ir::BlockArg;
2use itertools::{Either, Itertools};
3
4use cranelift_codegen::ir::condcodes::*;
5use cranelift_codegen::ir::types::*;
6use cranelift_codegen::ir::{self, MemFlags};
7use cranelift_codegen::ir::{Block, BlockCall, InstBuilder, JumpTableData};
8use cranelift_frontend::FunctionBuilder;
9use wasmtime_environ::{PtrSize, TagIndex, TypeIndex, WasmResult, WasmValType, wasm_unsupported};
10
11fn control_context_size(triple: &target_lexicon::Triple) -> WasmResult<u8> {
12    match (triple.architecture, triple.operating_system) {
13        (target_lexicon::Architecture::X86_64, target_lexicon::OperatingSystem::Linux) => Ok(24),
14        _ => Err(wasm_unsupported!(
15            "stack switching not supported on {triple}"
16        )),
17    }
18}
19
20use super::control_effect::ControlEffect;
21use super::fatpointer;
22
23/// This module contains compile-time counterparts to types defined elsewhere.
24pub(crate) mod stack_switching_helpers {
25    use core::marker::PhantomData;
26    use cranelift_codegen::ir;
27    use cranelift_codegen::ir::InstBuilder;
28    use cranelift_codegen::ir::condcodes::IntCC;
29    use cranelift_codegen::ir::types::*;
30    use cranelift_codegen::ir::{StackSlot, StackSlotKind::*};
31    use cranelift_frontend::FunctionBuilder;
32    use wasmtime_environ::PtrSize;
33
34    /// Provides information about the layout of a type when it is used as an
35    /// element in a host array. This is used for `VMHostArrayRef`.
36    pub(crate) trait VMHostArrayEntry {
37        /// Returns `(align, size)` in bytes.
38        fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32);
39    }
40
41    impl VMHostArrayEntry for u128 {
42        fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(_p: &P) -> (u8, u32) {
43            (16, 16)
44        }
45    }
46
47    impl<T> VMHostArrayEntry for *mut T {
48        fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32) {
49            (p.size(), p.size().into())
50        }
51    }
52
53    #[derive(Copy, Clone)]
54    pub struct VMContRef {
55        pub address: ir::Value,
56    }
57
58    #[derive(Copy, Clone)]
59    pub struct VMHostArrayRef<T> {
60        /// Address of the VMHostArray we are referencing
61        address: ir::Value,
62
63        /// The type parameter T is never used in the fields above. We still
64        /// want to have it for consistency with
65        /// `wasmtime_environ::Vector` and to use it in the associated
66        /// functions.
67        phantom: PhantomData<T>,
68    }
69
70    pub type VMPayloads = VMHostArrayRef<u128>;
71
72    // Actually a vector of *mut VMTagDefinition
73    pub type VMHandlerList = VMHostArrayRef<*mut u8>;
74
75    /// Compile-time representation of wasmtime_environ::VMStackChain,
76    /// consisting of two `ir::Value`s.
77    pub struct VMStackChain {
78        discriminant: ir::Value,
79        payload: ir::Value,
80    }
81
82    pub struct VMCommonStackInformation {
83        pub address: ir::Value,
84    }
85
86    /// Compile-time representation of `crate::runtime::vm::stack::VMContinuationStack`.
87    pub struct VMContinuationStack {
88        /// This is NOT the "top of stack" address of the stack itself. In line
89        /// with how the (runtime) `FiberStack` type works, this is a pointer to
90        /// the TOS address.
91        tos_ptr: ir::Value,
92    }
93
94    impl VMContRef {
95        pub fn new(address: ir::Value) -> VMContRef {
96            VMContRef { address }
97        }
98
99        pub fn args<'a>(
100            &self,
101            env: &mut crate::func_environ::FuncEnvironment<'a>,
102            builder: &mut FunctionBuilder,
103        ) -> VMPayloads {
104            let offset: i64 = env.offsets.ptr.vmcontref_args().into();
105            let address = builder.ins().iadd_imm(self.address, offset);
106            VMPayloads::new(address)
107        }
108
109        pub fn values<'a>(
110            &self,
111            env: &mut crate::func_environ::FuncEnvironment<'a>,
112            builder: &mut FunctionBuilder,
113        ) -> VMPayloads {
114            let offset: i64 = env.offsets.ptr.vmcontref_values().into();
115            let address = builder.ins().iadd_imm(self.address, offset);
116            VMPayloads::new(address)
117        }
118
119        pub fn common_stack_information<'a>(
120            &self,
121            env: &mut crate::func_environ::FuncEnvironment<'a>,
122            builder: &mut FunctionBuilder,
123        ) -> VMCommonStackInformation {
124            let offset: i64 = env.offsets.ptr.vmcontref_common_stack_information().into();
125            let address = builder.ins().iadd_imm(self.address, offset);
126            VMCommonStackInformation { address }
127        }
128
129        /// Stores the parent of this continuation, which may either be another
130        /// continuation or the initial stack. It is therefore represented as a
131        /// `VMStackChain` element.
132        pub fn set_parent_stack_chain<'a>(
133            &mut self,
134            env: &mut crate::func_environ::FuncEnvironment<'a>,
135            builder: &mut FunctionBuilder,
136            new_stack_chain: &VMStackChain,
137        ) {
138            let offset = env.offsets.ptr.vmcontref_parent_chain().into();
139            new_stack_chain.store(env, builder, self.address, offset)
140        }
141
142        /// Loads the parent of this continuation, which may either be another
143        /// continuation or the initial stack. It is therefore represented as a
144        /// `VMStackChain` element.
145        pub fn get_parent_stack_chain<'a>(
146            &self,
147            env: &mut crate::func_environ::FuncEnvironment<'a>,
148            builder: &mut FunctionBuilder,
149        ) -> VMStackChain {
150            let offset = env.offsets.ptr.vmcontref_parent_chain().into();
151            VMStackChain::load(env, builder, self.address, offset, env.pointer_type())
152        }
153
154        pub fn set_last_ancestor<'a>(
155            &self,
156            env: &mut crate::func_environ::FuncEnvironment<'a>,
157            builder: &mut FunctionBuilder,
158            last_ancestor: ir::Value,
159        ) {
160            let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
161            let mem_flags = ir::MemFlags::trusted();
162            builder
163                .ins()
164                .store(mem_flags, last_ancestor, self.address, offset);
165        }
166
167        pub fn get_last_ancestor<'a>(
168            &self,
169            env: &mut crate::func_environ::FuncEnvironment<'a>,
170            builder: &mut FunctionBuilder,
171        ) -> ir::Value {
172            let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
173            let mem_flags = ir::MemFlags::trusted();
174            builder
175                .ins()
176                .load(env.pointer_type(), mem_flags, self.address, offset)
177        }
178
179        /// Gets the revision counter the a given continuation
180        /// reference.
181        pub fn get_revision<'a>(
182            &mut self,
183            env: &mut crate::func_environ::FuncEnvironment<'a>,
184            builder: &mut FunctionBuilder,
185        ) -> ir::Value {
186            let mem_flags = ir::MemFlags::trusted();
187            let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
188            let revision = builder.ins().load(I64, mem_flags, self.address, offset);
189            revision
190        }
191
192        /// Sets the revision counter on the given continuation
193        /// reference to `revision + 1`.
194
195        pub fn incr_revision<'a>(
196            &mut self,
197            env: &mut crate::func_environ::FuncEnvironment<'a>,
198            builder: &mut FunctionBuilder,
199            revision: ir::Value,
200        ) -> ir::Value {
201            let mem_flags = ir::MemFlags::trusted();
202            let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
203            let revision_plus1 = builder.ins().iadd_imm(revision, 1);
204            builder
205                .ins()
206                .store(mem_flags, revision_plus1, self.address, offset);
207            revision_plus1
208        }
209
210        pub fn get_fiber_stack<'a>(
211            &self,
212            env: &mut crate::func_environ::FuncEnvironment<'a>,
213            builder: &mut FunctionBuilder,
214        ) -> VMContinuationStack {
215            // The top of stack field is stored at offset 0 of the `FiberStack`.
216            let offset: i64 = env.offsets.ptr.vmcontref_stack().into();
217            let fiber_stack_top_of_stack_ptr = builder.ins().iadd_imm(self.address, offset);
218            VMContinuationStack::new(fiber_stack_top_of_stack_ptr)
219        }
220    }
221
222    impl<T: VMHostArrayEntry> VMHostArrayRef<T> {
223        pub(crate) fn new(address: ir::Value) -> Self {
224            Self {
225                address,
226                phantom: PhantomData::default(),
227            }
228        }
229
230        fn get(&self, builder: &mut FunctionBuilder, ty: ir::Type, offset: i32) -> ir::Value {
231            let mem_flags = ir::MemFlags::trusted();
232            builder.ins().load(ty, mem_flags, self.address, offset)
233        }
234
235        fn set<U>(&self, builder: &mut FunctionBuilder, offset: i32, value: ir::Value) {
236            debug_assert_eq!(
237                builder.func.dfg.value_type(value),
238                Type::int_with_byte_size(u16::try_from(core::mem::size_of::<U>()).unwrap())
239                    .unwrap()
240            );
241            let mem_flags = ir::MemFlags::trusted();
242            builder.ins().store(mem_flags, value, self.address, offset);
243        }
244
245        pub fn get_data<'a>(
246            &self,
247            env: &mut crate::func_environ::FuncEnvironment<'a>,
248            builder: &mut FunctionBuilder,
249        ) -> ir::Value {
250            let offset = env.offsets.ptr.vmhostarray_data().into();
251            self.get(builder, env.pointer_type(), offset)
252        }
253
254        pub fn get_length<'a>(
255            &self,
256            env: &mut crate::func_environ::FuncEnvironment<'a>,
257            builder: &mut FunctionBuilder,
258        ) -> ir::Value {
259            // Array length is stored as u32.
260            let offset = env.offsets.ptr.vmhostarray_length().into();
261            self.get(builder, I32, offset)
262        }
263
264        fn set_length<'a>(
265            &self,
266            env: &mut crate::func_environ::FuncEnvironment<'a>,
267            builder: &mut FunctionBuilder,
268            length: ir::Value,
269        ) {
270            // Array length is stored as u32.
271            let offset = env.offsets.ptr.vmhostarray_length().into();
272            self.set::<u32>(builder, offset, length);
273        }
274
275        fn set_capacity<'a>(
276            &self,
277            env: &mut crate::func_environ::FuncEnvironment<'a>,
278            builder: &mut FunctionBuilder,
279            capacity: ir::Value,
280        ) {
281            // Array capacity is stored as u32.
282            let offset = env.offsets.ptr.vmhostarray_capacity().into();
283            self.set::<u32>(builder, offset, capacity);
284        }
285
286        fn set_data<'a>(
287            &self,
288            env: &mut crate::func_environ::FuncEnvironment<'a>,
289            builder: &mut FunctionBuilder,
290            data: ir::Value,
291        ) {
292            debug_assert_eq!(builder.func.dfg.value_type(data), env.pointer_type());
293            let offset: i32 = env.offsets.ptr.vmhostarray_data().into();
294            let mem_flags = ir::MemFlags::trusted();
295            builder.ins().store(mem_flags, data, self.address, offset);
296        }
297
298        /// Returns pointer to next empty slot in data buffer and marks the
299        /// subsequent `arg_count` slots as occupied.
300        pub fn occupy_next_slots<'a>(
301            &self,
302            env: &mut crate::func_environ::FuncEnvironment<'a>,
303            builder: &mut FunctionBuilder,
304            arg_count: i32,
305        ) -> ir::Value {
306            let data = self.get_data(env, builder);
307            let original_length = self.get_length(env, builder);
308            let new_length = builder
309                .ins()
310                .iadd_imm(original_length, i64::from(arg_count));
311            self.set_length(env, builder, new_length);
312
313            let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
314            let original_length = builder.ins().uextend(I64, original_length);
315            let byte_offset = builder
316                .ins()
317                .imul_imm(original_length, i64::from(entry_size));
318            builder.ins().iadd(data, byte_offset)
319        }
320
321        pub fn allocate_or_reuse_stack_slot<'a>(
322            &self,
323            env: &mut crate::func_environ::FuncEnvironment<'a>,
324            builder: &mut FunctionBuilder,
325            required_capacity: u32,
326            existing_slot: Option<StackSlot>,
327        ) -> StackSlot {
328            let (align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
329            let required_size = required_capacity * entry_size;
330
331            match existing_slot {
332                Some(slot) if builder.func.sized_stack_slots[slot].size >= required_size => {
333                    let slot_data = &builder.func.sized_stack_slots[slot];
334                    debug_assert!(align <= slot_data.align_shift);
335                    debug_assert_eq!(slot_data.kind, ExplicitSlot);
336                    let existing_capacity = slot_data.size / entry_size;
337
338                    let capacity_value = builder.ins().iconst(I32, i64::from(existing_capacity));
339                    let existing_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
340
341                    self.set_capacity(env, builder, capacity_value);
342                    self.set_data(env, builder, existing_data);
343
344                    slot
345                }
346                _ => {
347                    let capacity_value = builder.ins().iconst(I32, i64::from(required_capacity));
348                    let slot_size = ir::StackSlotData::new(
349                        ir::StackSlotKind::ExplicitSlot,
350                        required_size,
351                        align,
352                    );
353                    let slot = builder.create_sized_stack_slot(slot_size);
354                    let new_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
355
356                    self.set_capacity(env, builder, capacity_value);
357                    self.set_data(env, builder, new_data);
358
359                    slot
360                }
361            }
362        }
363
364        /// Loads n entries from this Vector object, where n is the length of
365        /// `load_types`, which also gives the types of the values to load.
366        /// Loading starts at index 0 of the Vector object.
367        pub fn load_data_entries<'a>(
368            &self,
369            env: &mut crate::func_environ::FuncEnvironment<'a>,
370            builder: &mut FunctionBuilder,
371            load_types: &[ir::Type],
372        ) -> Vec<ir::Value> {
373            let memflags = ir::MemFlags::trusted();
374
375            let data_start_pointer = self.get_data(env, builder);
376            let mut values = vec![];
377            let mut offset = 0;
378            let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
379            for valtype in load_types {
380                let val = builder
381                    .ins()
382                    .load(*valtype, memflags, data_start_pointer, offset);
383                values.push(val);
384                offset += i32::try_from(entry_size).unwrap();
385            }
386            values
387        }
388
389        /// Stores the given `values` in this Vector object, beginning at
390        /// index 0. This expects the Vector object to be empty (i.e., current
391        /// length is 0), and to be of sufficient capacity to store |`values`|
392        /// entries.
393        pub fn store_data_entries<'a>(
394            &self,
395            env: &mut crate::func_environ::FuncEnvironment<'a>,
396            builder: &mut FunctionBuilder,
397            values: &[ir::Value],
398        ) {
399            let store_count = builder
400                .ins()
401                .iconst(I32, i64::try_from(values.len()).unwrap());
402
403            let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
404
405            debug_assert!(values.iter().all(|val| {
406                let ty = builder.func.dfg.value_type(*val);
407                let size = ty.bytes();
408                size <= entry_size
409            }));
410
411            let memflags = ir::MemFlags::trusted();
412
413            let data_start_pointer = self.get_data(env, builder);
414
415            let mut offset = 0;
416            for value in values {
417                builder
418                    .ins()
419                    .store(memflags, *value, data_start_pointer, offset);
420                offset += i32::try_from(entry_size).unwrap();
421            }
422
423            self.set_length(env, builder, store_count);
424        }
425
426        pub fn clear<'a>(
427            &self,
428            env: &mut crate::func_environ::FuncEnvironment<'a>,
429            builder: &mut FunctionBuilder,
430            discard_buffer: bool,
431        ) {
432            let zero32 = builder.ins().iconst(I32, 0);
433            self.set_length(env, builder, zero32);
434
435            if discard_buffer {
436                let zero32 = builder.ins().iconst(I32, 0);
437                self.set_capacity(env, builder, zero32);
438
439                let zero_ptr = builder.ins().iconst(env.pointer_type(), 0);
440                self.set_data(env, builder, zero_ptr);
441            }
442        }
443    }
444
445    impl VMStackChain {
446        /// Creates a `Self` corresponding to `VMStackChain::Continuation(contref)`.
447        pub fn from_continuation<'a>(
448            env: &mut crate::func_environ::FuncEnvironment<'a>,
449            builder: &mut FunctionBuilder,
450            contref: ir::Value,
451        ) -> VMStackChain {
452            debug_assert_eq!(
453                env.offsets.ptr.size_of_vmstack_chain(),
454                2 * env.offsets.ptr.size()
455            );
456            let discriminant = wasmtime_environ::STACK_CHAIN_CONTINUATION_DISCRIMINANT;
457            let discriminant = builder
458                .ins()
459                .iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
460            VMStackChain {
461                discriminant,
462                payload: contref,
463            }
464        }
465
466        /// Creates a `Self` corresponding to `VMStackChain::Absent`.
467        pub fn absent<'a>(
468            env: &mut crate::func_environ::FuncEnvironment<'a>,
469            builder: &mut FunctionBuilder,
470        ) -> VMStackChain {
471            debug_assert_eq!(
472                env.offsets.ptr.size_of_vmstack_chain(),
473                2 * env.offsets.ptr.size()
474            );
475            let discriminant = wasmtime_environ::STACK_CHAIN_ABSENT_DISCRIMINANT;
476            let discriminant = builder
477                .ins()
478                .iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
479            let zero_filler = builder.ins().iconst(env.pointer_type(), 0i64);
480            VMStackChain {
481                discriminant,
482                payload: zero_filler,
483            }
484        }
485
486        pub fn is_initial_stack<'a>(
487            &self,
488            _env: &mut crate::func_environ::FuncEnvironment<'a>,
489            builder: &mut FunctionBuilder,
490        ) -> ir::Value {
491            builder.ins().icmp_imm(
492                IntCC::Equal,
493                self.discriminant,
494                i64::try_from(wasmtime_environ::STACK_CHAIN_INITIAL_STACK_DISCRIMINANT).unwrap(),
495            )
496        }
497
498        /// Return the two raw `ir::Value`s that represent this VMStackChain.
499        pub fn to_raw_parts(&self) -> [ir::Value; 2] {
500            [self.discriminant, self.payload]
501        }
502
503        /// Construct a `Self` from two raw `ir::Value`s.
504        pub fn from_raw_parts(raw_data: [ir::Value; 2]) -> VMStackChain {
505            VMStackChain {
506                discriminant: raw_data[0],
507                payload: raw_data[1],
508            }
509        }
510
511        /// Load a `VMStackChain` object from the given address.
512        pub fn load<'a>(
513            _env: &mut crate::func_environ::FuncEnvironment<'a>,
514            builder: &mut FunctionBuilder,
515            pointer: ir::Value,
516            initial_offset: i32,
517            pointer_type: ir::Type,
518        ) -> VMStackChain {
519            let memflags = ir::MemFlags::trusted();
520            let mut offset = initial_offset;
521            let mut data = vec![];
522            for _ in 0..2 {
523                data.push(builder.ins().load(pointer_type, memflags, pointer, offset));
524                offset += i32::try_from(pointer_type.bytes()).unwrap();
525            }
526            let data = <[ir::Value; 2]>::try_from(data).unwrap();
527            Self::from_raw_parts(data)
528        }
529
530        /// Store this `VMStackChain` object at the given address.
531        pub fn store<'a>(
532            &self,
533            env: &mut crate::func_environ::FuncEnvironment<'a>,
534            builder: &mut FunctionBuilder,
535            target_pointer: ir::Value,
536            initial_offset: i32,
537        ) {
538            let memflags = ir::MemFlags::trusted();
539            let mut offset = initial_offset;
540            let data = self.to_raw_parts();
541
542            for value in data {
543                debug_assert_eq!(builder.func.dfg.value_type(value), env.pointer_type());
544                builder.ins().store(memflags, value, target_pointer, offset);
545                offset += i32::try_from(env.pointer_type().bytes()).unwrap();
546            }
547        }
548
549        /// Use this only if you've already checked that `self` corresponds to a `VMStackChain::Continuation`.
550        pub fn unchecked_get_continuation(&self) -> ir::Value {
551            self.payload
552        }
553
554        /// Must only be called if `self` represents a `InitialStack` or
555        /// `Continuation` variant. Returns a pointer to the associated
556        /// `CommonStackInformation` object.
557        pub fn get_common_stack_information<'a>(
558            &self,
559            env: &mut crate::func_environ::FuncEnvironment<'a>,
560            _builder: &mut FunctionBuilder,
561        ) -> VMCommonStackInformation {
562            // `self` corresponds to a VMStackChain::InitialStack or
563            // VMStackChain::Continuation.
564            // In both cases, the payload is a pointer.
565            let address = self.payload;
566
567            // `obj` is now a pointer to the beginning of either
568            // 1. A `VMContRef` struct (in the case of a
569            // VMStackChain::Continuation)
570            // 2. A CommonStackInformation struct (in the case of
571            // VMStackChain::InitialStack)
572            //
573            // Since a `VMContRef` starts with an (inlined) CommonStackInformation
574            // object at offset 0, we actually have in both cases that `ptr` is
575            // now the address of the beginning of a VMStackLimits object.
576            debug_assert_eq!(env.offsets.ptr.vmcontref_common_stack_information(), 0);
577            VMCommonStackInformation { address }
578        }
579    }
580
581    impl VMCommonStackInformation {
582        fn get_state_ptr<'a>(
583            &self,
584            env: &mut crate::func_environ::FuncEnvironment<'a>,
585            builder: &mut FunctionBuilder,
586        ) -> ir::Value {
587            let offset: i64 = env.offsets.ptr.vmcommon_stack_information_state().into();
588
589            builder.ins().iadd_imm(self.address, offset)
590        }
591
592        fn get_stack_limits_ptr<'a>(
593            &self,
594            env: &mut crate::func_environ::FuncEnvironment<'a>,
595            builder: &mut FunctionBuilder,
596        ) -> ir::Value {
597            let offset: i64 = env.offsets.ptr.vmcommon_stack_information_limits().into();
598
599            builder.ins().iadd_imm(self.address, offset)
600        }
601
602        fn load_state<'a>(
603            &self,
604            env: &mut crate::func_environ::FuncEnvironment<'a>,
605            builder: &mut FunctionBuilder,
606        ) -> ir::Value {
607            let mem_flags = ir::MemFlags::trusted();
608            let state_ptr = self.get_state_ptr(env, builder);
609
610            builder.ins().load(I32, mem_flags, state_ptr, 0)
611        }
612
613        fn set_state_no_payload<'a>(
614            &self,
615            env: &mut crate::func_environ::FuncEnvironment<'a>,
616            builder: &mut FunctionBuilder,
617            discriminant: u32,
618        ) {
619            let discriminant = builder.ins().iconst(I32, i64::from(discriminant));
620            let mem_flags = ir::MemFlags::trusted();
621            let state_ptr = self.get_state_ptr(env, builder);
622
623            builder.ins().store(mem_flags, discriminant, state_ptr, 0);
624        }
625
626        pub fn set_state_running<'a>(
627            &self,
628            env: &mut crate::func_environ::FuncEnvironment<'a>,
629            builder: &mut FunctionBuilder,
630        ) {
631            let discriminant = wasmtime_environ::STACK_STATE_RUNNING_DISCRIMINANT;
632            self.set_state_no_payload(env, builder, discriminant);
633        }
634
635        pub fn set_state_parent<'a>(
636            &self,
637            env: &mut crate::func_environ::FuncEnvironment<'a>,
638            builder: &mut FunctionBuilder,
639        ) {
640            let discriminant = wasmtime_environ::STACK_STATE_PARENT_DISCRIMINANT;
641            self.set_state_no_payload(env, builder, discriminant);
642        }
643
644        pub fn set_state_returned<'a>(
645            &self,
646            env: &mut crate::func_environ::FuncEnvironment<'a>,
647            builder: &mut FunctionBuilder,
648        ) {
649            let discriminant = wasmtime_environ::STACK_STATE_RETURNED_DISCRIMINANT;
650            self.set_state_no_payload(env, builder, discriminant);
651        }
652
653        pub fn set_state_suspended<'a>(
654            &self,
655            env: &mut crate::func_environ::FuncEnvironment<'a>,
656            builder: &mut FunctionBuilder,
657        ) {
658            let discriminant = wasmtime_environ::STACK_STATE_SUSPENDED_DISCRIMINANT;
659            self.set_state_no_payload(env, builder, discriminant);
660        }
661
662        /// Checks whether the `VMStackState` reflects that the stack has ever been
663        /// active (instead of just having been allocated, but never resumed).
664        pub fn was_invoked<'a>(
665            &self,
666            env: &mut crate::func_environ::FuncEnvironment<'a>,
667            builder: &mut FunctionBuilder,
668        ) -> ir::Value {
669            let actual_state = self.load_state(env, builder);
670            let allocated = wasmtime_environ::STACK_STATE_FRESH_DISCRIMINANT;
671            builder
672                .ins()
673                .icmp_imm(IntCC::NotEqual, actual_state, i64::from(allocated))
674        }
675
676        pub fn get_handler_list<'a>(
677            &self,
678            env: &mut crate::func_environ::FuncEnvironment<'a>,
679            builder: &mut FunctionBuilder,
680        ) -> VMHandlerList {
681            let offset: i64 = env.offsets.ptr.vmcommon_stack_information_handlers().into();
682            let address = builder.ins().iadd_imm(self.address, offset);
683            VMHandlerList::new(address)
684        }
685
686        pub fn get_first_switch_handler_index<'a>(
687            &self,
688            env: &mut crate::func_environ::FuncEnvironment<'a>,
689            builder: &mut FunctionBuilder,
690        ) -> ir::Value {
691            // Field first_switch_handler_index has type u32
692            let memflags = ir::MemFlags::trusted();
693            let offset: i32 = env
694                .offsets
695                .ptr
696                .vmcommon_stack_information_first_switch_handler_index()
697                .into();
698            builder.ins().load(I32, memflags, self.address, offset)
699        }
700
701        pub fn set_first_switch_handler_index<'a>(
702            &self,
703            env: &mut crate::func_environ::FuncEnvironment<'a>,
704            builder: &mut FunctionBuilder,
705            value: ir::Value,
706        ) {
707            // Field first_switch_handler_index has type u32
708            let memflags = ir::MemFlags::trusted();
709            let offset: i32 = env
710                .offsets
711                .ptr
712                .vmcommon_stack_information_first_switch_handler_index()
713                .into();
714            builder.ins().store(memflags, value, self.address, offset);
715        }
716
717        /// Sets `last_wasm_entry_sp` and `stack_limit` fields in
718        /// `VMRuntimelimits` using the values from the `VMStackLimits` of this
719        /// object.
720        pub fn write_limits_to_vmcontext<'a>(
721            &self,
722            env: &mut crate::func_environ::FuncEnvironment<'a>,
723            builder: &mut FunctionBuilder,
724            vmruntime_limits_ptr: ir::Value,
725        ) {
726            let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
727
728            let memflags = ir::MemFlags::trusted();
729
730            let mut copy_to_vm_runtime_limits = |our_offset, their_offset| {
731                let our_value = builder.ins().load(
732                    env.pointer_type(),
733                    memflags,
734                    stack_limits_ptr,
735                    i32::from(our_offset),
736                );
737                builder.ins().store(
738                    memflags,
739                    our_value,
740                    vmruntime_limits_ptr,
741                    i32::from(their_offset),
742                );
743            };
744
745            let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
746            let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
747            let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
748            copy_to_vm_runtime_limits(
749                stack_limit_offset,
750                pointer_size.vmstore_context_stack_limit(),
751            );
752            copy_to_vm_runtime_limits(
753                last_wasm_entry_fp_offset,
754                pointer_size.vmstore_context_last_wasm_entry_fp(),
755            );
756        }
757
758        /// Overwrites the `last_wasm_entry_fp` field of the `VMStackLimits`
759        /// object in the `VMStackLimits` of this object by loading the corresponding
760        /// field from the `VMRuntimeLimits`.
761        /// If `load_stack_limit` is true, we do the same for the `stack_limit`
762        /// field.
763        pub fn load_limits_from_vmcontext<'a>(
764            &self,
765            env: &mut crate::func_environ::FuncEnvironment<'a>,
766            builder: &mut FunctionBuilder,
767            vmruntime_limits_ptr: ir::Value,
768            load_stack_limit: bool,
769        ) {
770            let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
771
772            let memflags = ir::MemFlags::trusted();
773            let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
774
775            let mut copy = |runtime_limits_offset, stack_limits_offset| {
776                let from_vm_runtime_limits = builder.ins().load(
777                    env.pointer_type(),
778                    memflags,
779                    vmruntime_limits_ptr,
780                    runtime_limits_offset,
781                );
782                builder.ins().store(
783                    memflags,
784                    from_vm_runtime_limits,
785                    stack_limits_ptr,
786                    stack_limits_offset,
787                );
788            };
789
790            let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
791            copy(
792                pointer_size.vmstore_context_last_wasm_entry_fp(),
793                last_wasm_entry_fp_offset,
794            );
795
796            if load_stack_limit {
797                let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
798                copy(
799                    pointer_size.vmstore_context_stack_limit(),
800                    stack_limit_offset,
801                );
802            }
803        }
804    }
805
806    impl VMContinuationStack {
807        /// The parameter is NOT the "top of stack" address of the stack itself. In line
808        /// with how the (runtime) `FiberStack` type works, this is a pointer to
809        /// the TOS address.
810        pub fn new(tos_ptr: ir::Value) -> Self {
811            Self { tos_ptr }
812        }
813
814        fn load_top_of_stack<'a>(
815            &self,
816            env: &mut crate::func_environ::FuncEnvironment<'a>,
817            builder: &mut FunctionBuilder,
818        ) -> ir::Value {
819            let mem_flags = ir::MemFlags::trusted();
820            builder
821                .ins()
822                .load(env.pointer_type(), mem_flags, self.tos_ptr, 0)
823        }
824
825        /// Returns address of the control context stored in the stack memory,
826        /// as used by stack_switch instructions.
827        pub fn load_control_context<'a>(
828            &self,
829            env: &mut crate::func_environ::FuncEnvironment<'a>,
830            builder: &mut FunctionBuilder,
831        ) -> ir::Value {
832            let tos = self.load_top_of_stack(env, builder);
833            // Control context begins 24 bytes below top of stack (see unix.rs)
834            builder.ins().iadd_imm(tos, -0x18)
835        }
836    }
837}
838
839use helpers::VMStackChain;
840use stack_switching_helpers as helpers;
841
842/// Stores the given arguments in the appropriate `VMPayloads` object in the
843/// continuation. If the continuation was never invoked, use the `args` object.
844/// Otherwise, use the `values` object.
845pub(crate) fn vmcontref_store_payloads<'a>(
846    env: &mut crate::func_environ::FuncEnvironment<'a>,
847    builder: &mut FunctionBuilder,
848    values: &[ir::Value],
849    contref: ir::Value,
850) {
851    let count =
852        i32::try_from(values.len()).expect("Number of stack switching payloads should fit in i32");
853    if values.len() > 0 {
854        let use_args_block = builder.create_block();
855        let use_payloads_block = builder.create_block();
856        let store_data_block = builder.create_block();
857        builder.append_block_param(store_data_block, env.pointer_type());
858
859        let co = helpers::VMContRef::new(contref);
860        let csi = co.common_stack_information(env, builder);
861        let was_invoked = csi.was_invoked(env, builder);
862        builder
863            .ins()
864            .brif(was_invoked, use_payloads_block, &[], use_args_block, &[]);
865
866        {
867            builder.switch_to_block(use_args_block);
868            builder.seal_block(use_args_block);
869
870            let args = co.args(env, builder);
871            let ptr = args.occupy_next_slots(env, builder, count);
872
873            builder
874                .ins()
875                .jump(store_data_block, &[BlockArg::Value(ptr)]);
876        }
877
878        {
879            builder.switch_to_block(use_payloads_block);
880            builder.seal_block(use_payloads_block);
881
882            let payloads = co.values(env, builder);
883
884            // This also checks that the buffer is large enough to hold
885            // `values.len()` more elements.
886            let ptr = payloads.occupy_next_slots(env, builder, count);
887            builder
888                .ins()
889                .jump(store_data_block, &[BlockArg::Value(ptr)]);
890        }
891
892        {
893            builder.switch_to_block(store_data_block);
894            builder.seal_block(store_data_block);
895
896            let ptr = builder.block_params(store_data_block)[0];
897
898            // Store the values.
899            let memflags = ir::MemFlags::trusted();
900            let mut offset = 0;
901            for value in values {
902                builder.ins().store(memflags, *value, ptr, offset);
903                offset += i32::from(env.offsets.ptr.maximum_value_size());
904            }
905        }
906    }
907}
908
909pub(crate) fn tag_address<'a>(
910    env: &mut crate::func_environ::FuncEnvironment<'a>,
911    builder: &mut FunctionBuilder,
912    index: u32,
913) -> ir::Value {
914    let vmctx = env.vmctx_val(&mut builder.cursor());
915    let tag_index = wasmtime_environ::TagIndex::from_u32(index);
916    let pointer_type = env.pointer_type();
917    if let Some(def_index) = env.module.defined_tag_index(tag_index) {
918        let offset = i32::try_from(env.offsets.vmctx_vmtag_definition(def_index)).unwrap();
919        builder.ins().iadd_imm(vmctx, i64::from(offset))
920    } else {
921        let offset = i32::try_from(env.offsets.vmctx_vmtag_import_from(tag_index)).unwrap();
922        builder.ins().load(
923            pointer_type,
924            ir::MemFlags::trusted().with_readonly(),
925            vmctx,
926            ir::immediates::Offset32::new(offset),
927        )
928    }
929}
930
931/// Returns the stack chain saved in the given `VMContext`. Note that the
932/// head of the list is the actively running stack (initial stack or
933/// continuation).
934pub fn vmctx_load_stack_chain<'a>(
935    env: &mut crate::func_environ::FuncEnvironment<'a>,
936    builder: &mut FunctionBuilder,
937    vmctx: ir::Value,
938) -> VMStackChain {
939    let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
940
941    // First we need to get the `VMStoreContext`.
942    let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
943    let vm_store_context = builder.ins().load(
944        env.pointer_type(),
945        MemFlags::trusted(),
946        vmctx,
947        vm_store_context_offset,
948    );
949
950    VMStackChain::load(
951        env,
952        builder,
953        vm_store_context,
954        stack_chain_offset,
955        env.pointer_type(),
956    )
957}
958
959/// Stores the given stack chain saved in the `VMContext`, overwriting the
960/// existing one.
961pub fn vmctx_store_stack_chain<'a>(
962    env: &mut crate::func_environ::FuncEnvironment<'a>,
963    builder: &mut FunctionBuilder,
964    vmctx: ir::Value,
965    stack_chain: &VMStackChain,
966) {
967    let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
968
969    // First we need to get the `VMStoreContext`.
970    let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
971    let vm_store_context = builder.ins().load(
972        env.pointer_type(),
973        MemFlags::trusted(),
974        vmctx,
975        vm_store_context_offset,
976    );
977
978    stack_chain.store(env, builder, vm_store_context, stack_chain_offset)
979}
980
981/// Similar to `vmctx_store_stack_chain`, but instead of storing an arbitrary
982/// `VMStackChain`, stores VMStackChain::Continuation(contref)`.
983pub fn vmctx_set_active_continuation<'a>(
984    env: &mut crate::func_environ::FuncEnvironment<'a>,
985    builder: &mut FunctionBuilder,
986    vmctx: ir::Value,
987    contref: ir::Value,
988) {
989    let chain = VMStackChain::from_continuation(env, builder, contref);
990    vmctx_store_stack_chain(env, builder, vmctx, &chain)
991}
992
993pub fn vmctx_load_vm_runtime_limits_ptr<'a>(
994    env: &mut crate::func_environ::FuncEnvironment<'a>,
995    builder: &mut FunctionBuilder,
996    vmctx: ir::Value,
997) -> ir::Value {
998    let pointer_type = env.pointer_type();
999    let offset = i32::from(env.offsets.ptr.vmctx_store_context());
1000
1001    // The *pointer* to the VMRuntimeLimits does not change within the
1002    // same function, allowing us to set the `read_only` flag.
1003    let flags = ir::MemFlags::trusted().with_readonly();
1004
1005    builder.ins().load(pointer_type, flags, vmctx, offset)
1006}
1007
1008/// This function generates code that searches for a handler for `tag_address`,
1009/// which must be a `*mut VMTagDefinition`. The search walks up the chain of
1010/// continuations beginning at `start`.
1011///
1012/// The flag `search_suspend_handlers` determines whether we search for a
1013/// suspend or switch handler. Concretely, this influences which part of each
1014/// handler list we will search.
1015///
1016/// We trap if no handler was found.
1017///
1018/// The returned values are:
1019/// 1. The stack (continuation or initial stack, represented as a VMStackChain) in
1020///    whose handler list we found the tag (i.e., the stack that performed the
1021///    resume instruction that installed handler for the tag).
1022/// 2. The continuation whose parent is the stack mentioned in 1.
1023/// 3. The index of the handler in the handler list.
1024///
1025/// In pseudo-code, the generated code's behavior can be expressed as
1026/// follows:
1027///
1028/// chain_link = start
1029/// while !chain_link.is_initial_stack() {
1030///   contref = chain_link.get_contref()
1031///   parent_link = contref.parent
1032///   parent_csi = parent_link.get_common_stack_information();
1033///   handlers = parent_csi.handlers;
1034///   (begin_range, end_range) = if search_suspend_handlers {
1035///     (0, parent_csi.first_switch_handler_index)
1036///   } else {
1037///     (parent_csi.first_switch_handler_index, handlers.length)
1038///   };
1039///   for index in begin_range..end_range {
1040///     if handlers[index] == tag_address {
1041///       goto on_match(contref, index)
1042///     }
1043///   }
1044///   chain_link = parent_link
1045/// }
1046/// trap(unhandled_tag)
1047///
1048/// on_match(conref : VMContRef, handler_index : u32)
1049/// ... execution continues here here ...
1050///
1051fn search_handler<'a>(
1052    env: &mut crate::func_environ::FuncEnvironment<'a>,
1053    builder: &mut FunctionBuilder,
1054    start: &helpers::VMStackChain,
1055    tag_address: ir::Value,
1056    search_suspend_handlers: bool,
1057) -> (VMStackChain, ir::Value, ir::Value) {
1058    let handle_link = builder.create_block();
1059    let begin_search_handler_list = builder.create_block();
1060    let try_index = builder.create_block();
1061    let compare_tags = builder.create_block();
1062    let on_match = builder.create_block();
1063    let on_no_match = builder.create_block();
1064    let block_args = start.to_raw_parts().map(|v| BlockArg::Value(v));
1065
1066    // Terminate previous block:
1067    builder.ins().jump(handle_link, &block_args);
1068
1069    // Block handle_link
1070    let chain_link = {
1071        builder.append_block_param(handle_link, env.pointer_type());
1072        builder.append_block_param(handle_link, env.pointer_type());
1073        builder.switch_to_block(handle_link);
1074
1075        let raw_parts = builder.block_params(handle_link);
1076        let chain_link = helpers::VMStackChain::from_raw_parts([raw_parts[0], raw_parts[1]]);
1077        let is_initial_stack = chain_link.is_initial_stack(env, builder);
1078        builder.ins().brif(
1079            is_initial_stack,
1080            on_no_match,
1081            &[],
1082            begin_search_handler_list,
1083            &[],
1084        );
1085        chain_link
1086    };
1087
1088    // Block begin_search_handler_list
1089    let (contref, parent_link, handler_list_data_ptr, end_range) = {
1090        builder.switch_to_block(begin_search_handler_list);
1091        let contref = chain_link.unchecked_get_continuation();
1092        let contref = helpers::VMContRef::new(contref);
1093
1094        let parent_link = contref.get_parent_stack_chain(env, builder);
1095        let parent_csi = parent_link.get_common_stack_information(env, builder);
1096
1097        let handlers = parent_csi.get_handler_list(env, builder);
1098        let handler_list_data_ptr = handlers.get_data(env, builder);
1099
1100        let first_switch_handler_index = parent_csi.get_first_switch_handler_index(env, builder);
1101
1102        // Note that these indices are inclusive-exclusive, i.e. [begin_range, end_range).
1103        let (begin_range, end_range) = if search_suspend_handlers {
1104            let zero = builder.ins().iconst(I32, 0);
1105            (zero, first_switch_handler_index)
1106        } else {
1107            let length = handlers.get_length(env, builder);
1108            (first_switch_handler_index, length)
1109        };
1110
1111        builder
1112            .ins()
1113            .jump(try_index, &[BlockArg::Value(begin_range)]);
1114
1115        (contref, parent_link, handler_list_data_ptr, end_range)
1116    };
1117
1118    // Block try_index
1119    let index = {
1120        builder.append_block_param(try_index, I32);
1121        builder.switch_to_block(try_index);
1122        let index = builder.block_params(try_index)[0];
1123
1124        let in_bounds = builder
1125            .ins()
1126            .icmp(IntCC::UnsignedLessThan, index, end_range);
1127        let block_args = parent_link.to_raw_parts().map(|v| BlockArg::Value(v));
1128        builder
1129            .ins()
1130            .brif(in_bounds, compare_tags, &[], handle_link, &block_args);
1131        index
1132    };
1133
1134    // Block compare_tags
1135    {
1136        builder.switch_to_block(compare_tags);
1137
1138        let base = handler_list_data_ptr;
1139        let entry_size = env.pointer_type().bytes();
1140        let offset = builder.ins().imul_imm(index, i64::from(entry_size));
1141        let offset = builder.ins().uextend(I64, offset);
1142        let entry_address = builder.ins().iadd(base, offset);
1143
1144        let memflags = ir::MemFlags::trusted();
1145
1146        let handled_tag = builder
1147            .ins()
1148            .load(env.pointer_type(), memflags, entry_address, 0);
1149
1150        let tags_match = builder.ins().icmp(IntCC::Equal, handled_tag, tag_address);
1151        let incremented_index = builder.ins().iadd_imm(index, 1);
1152        builder.ins().brif(
1153            tags_match,
1154            on_match,
1155            &[],
1156            try_index,
1157            &[BlockArg::Value(incremented_index)],
1158        );
1159    }
1160
1161    // Block on_no_match
1162    {
1163        builder.switch_to_block(on_no_match);
1164        builder.set_cold_block(on_no_match);
1165        builder.ins().trap(crate::TRAP_UNHANDLED_TAG);
1166    }
1167
1168    builder.seal_block(handle_link);
1169    builder.seal_block(begin_search_handler_list);
1170    builder.seal_block(try_index);
1171    builder.seal_block(compare_tags);
1172    builder.seal_block(on_match);
1173    builder.seal_block(on_no_match);
1174
1175    // final block: on_match
1176    builder.switch_to_block(on_match);
1177
1178    (parent_link, contref.address, index)
1179}
1180
1181pub(crate) fn translate_cont_bind<'a>(
1182    env: &mut crate::func_environ::FuncEnvironment<'a>,
1183    builder: &mut FunctionBuilder,
1184    contobj: ir::Value,
1185    args: &[ir::Value],
1186) -> ir::Value {
1187    let (witness, contref) = fatpointer::deconstruct(env, &mut builder.cursor(), contobj);
1188
1189    // The typing rules for cont.bind allow a null reference to be passed to it.
1190    builder.ins().trapz(contref, crate::TRAP_NULL_REFERENCE);
1191
1192    let mut vmcontref = helpers::VMContRef::new(contref);
1193    let revision = vmcontref.get_revision(env, builder);
1194    let evidence = builder.ins().icmp(IntCC::Equal, witness, revision);
1195    builder
1196        .ins()
1197        .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1198
1199    vmcontref_store_payloads(env, builder, args, contref);
1200
1201    let revision = vmcontref.incr_revision(env, builder, revision);
1202    let contobj = fatpointer::construct(env, &mut builder.cursor(), revision, contref);
1203    contobj
1204}
1205
1206pub(crate) fn translate_cont_new<'a>(
1207    env: &mut crate::func_environ::FuncEnvironment<'a>,
1208    builder: &mut FunctionBuilder,
1209    func: ir::Value,
1210    arg_types: &[WasmValType],
1211    return_types: &[WasmValType],
1212) -> WasmResult<ir::Value> {
1213    // The typing rules for cont.new allow a null reference to be passed to it.
1214    builder.ins().trapz(func, crate::TRAP_NULL_REFERENCE);
1215
1216    let nargs = builder
1217        .ins()
1218        .iconst(I32, i64::try_from(arg_types.len()).unwrap());
1219    let nreturns = builder
1220        .ins()
1221        .iconst(I32, i64::try_from(return_types.len()).unwrap());
1222
1223    let cont_new_func = super::builtins::cont_new(env, &mut builder.func)?;
1224    let vmctx = env.vmctx_val(&mut builder.cursor());
1225    let call_inst = builder
1226        .ins()
1227        .call(cont_new_func, &[vmctx, func, nargs, nreturns]);
1228    let contref = *builder.func.dfg.inst_results(call_inst).first().unwrap();
1229
1230    let tag = helpers::VMContRef::new(contref).get_revision(env, builder);
1231    let contobj = fatpointer::construct(env, &mut builder.cursor(), tag, contref);
1232    Ok(contobj)
1233}
1234
1235pub(crate) fn translate_resume<'a>(
1236    env: &mut crate::func_environ::FuncEnvironment<'a>,
1237    builder: &mut FunctionBuilder,
1238    type_index: u32,
1239    resume_contobj: ir::Value,
1240    resume_args: &[ir::Value],
1241    resumetable: &[(u32, Option<ir::Block>)],
1242) -> WasmResult<Vec<ir::Value>> {
1243    // The resume instruction is the most involved instruction to
1244    // compile as it is responsible for both continuation application
1245    // and control tag dispatch.
1246    //
1247    // Here we translate a resume instruction into several basic
1248    // blocks as follows:
1249    //
1250    //        previous block
1251    //              |
1252    //              |
1253    //        resume_block
1254    //         /           \
1255    //        /             \
1256    //        |             |
1257    //  return_block        |
1258    //                suspend block
1259    //                      |
1260    //                dispatch block
1261    //
1262    // * resume_block handles continuation arguments and performs
1263    //   actual stack switch. On ordinary return from resume, it jumps
1264    //   to the `return_block`, whereas on suspension it jumps to the
1265    //   `suspend_block`.
1266    // * suspend_block is used on suspension, jumps onward to
1267    //   `dispatch_block`.
1268    // * dispatch_block uses a jump table to dispatch to actual
1269    //   user-defined handler blocks, based on the handler index
1270    //   provided on suspension. Note that we do not jump to the
1271    //   handler blocks directly. Instead, each handler block has a
1272    //   corresponding premable block, which we jump to in order to
1273    //   reach a particular handler block. The preamble block prepares
1274    //   the arguments and continuation object to be passed to the
1275    //   actual handler block.
1276    //
1277    let resume_block = builder.create_block();
1278    let return_block = builder.create_block();
1279    let suspend_block = builder.create_block();
1280    let dispatch_block = builder.create_block();
1281
1282    let vmctx = env.vmctx_val(&mut builder.cursor());
1283
1284    // Split the resumetable into suspend handlers (each represented by the tag
1285    // index and handler block) and the switch handlers (represented just by the
1286    // tag index). Note that we currently don't remove duplicate tags.
1287    let (suspend_handlers, switch_tags): (Vec<(u32, Block)>, Vec<u32>) = resumetable
1288        .iter()
1289        .partition_map(|(tag_index, block_opt)| match block_opt {
1290            Some(block) => Either::Left((*tag_index, *block)),
1291            None => Either::Right(*tag_index),
1292        });
1293
1294    // Technically, there is no need to have a dedicated resume block, we could
1295    // just put all of its contents into the current block.
1296    builder.ins().jump(resume_block, &[]);
1297
1298    // Resume block: actually resume the continuation chain ending at `resume_contref`.
1299    let (resume_result, vm_runtime_limits_ptr, original_stack_chain, new_stack_chain) = {
1300        builder.switch_to_block(resume_block);
1301        builder.seal_block(resume_block);
1302
1303        let (witness, resume_contref) =
1304            fatpointer::deconstruct(env, &mut builder.cursor(), resume_contobj);
1305
1306        // The typing rules for resume allow a null reference to be passed to it.
1307        builder
1308            .ins()
1309            .trapz(resume_contref, crate::TRAP_NULL_REFERENCE);
1310
1311        let mut vmcontref = helpers::VMContRef::new(resume_contref);
1312
1313        let revision = vmcontref.get_revision(env, builder);
1314        let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
1315        builder
1316            .ins()
1317            .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1318        let _next_revision = vmcontref.incr_revision(env, builder, revision);
1319
1320        if resume_args.len() > 0 {
1321            // We store the arguments in the `VMContRef` to be resumed.
1322            vmcontref_store_payloads(env, builder, resume_args, resume_contref);
1323        }
1324
1325        // Splice together stack chains:
1326        // Connect the end of the chain starting at `resume_contref` to the currently active chain.
1327        let mut last_ancestor = helpers::VMContRef::new(vmcontref.get_last_ancestor(env, builder));
1328
1329        // Make the currently running continuation (if any) the parent of the one we are about to resume.
1330        let original_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1331        last_ancestor.set_parent_stack_chain(env, builder, &original_stack_chain);
1332
1333        // Just for consistency: `vmcontref` is about to get state Running, so let's zero out its last_ancestor field.
1334        let zero = builder.ins().iconst(env.pointer_type(), 0);
1335        vmcontref.set_last_ancestor(env, builder, zero);
1336
1337        // We mark `resume_contref` as the currently running one
1338        vmctx_set_active_continuation(env, builder, vmctx, resume_contref);
1339
1340        // Note that the resume_contref libcall a few lines further below
1341        // manipulates the stack limits as follows:
1342        // 1. Copy stack_limit, last_wasm_entry_sp and last_wasm_exit* values from
1343        // VMRuntimeLimits into the currently active continuation (i.e., the
1344        // one that will become the parent of the to-be-resumed one)
1345        //
1346        // 2. Copy `stack_limit` and `last_wasm_entry_sp` in the
1347        // `VMStackLimits` of `resume_contref` into the `VMRuntimeLimits`.
1348        //
1349        // See the comment on `wasmtime_environ::VMStackChain` for a
1350        // description of the invariants that we maintain for the various stack
1351        // limits.
1352
1353        // `resume_contref` is now active, and its parent is suspended.
1354        let resume_contref = helpers::VMContRef::new(resume_contref);
1355        let resume_csi = resume_contref.common_stack_information(env, builder);
1356        let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1357        resume_csi.set_state_running(env, builder);
1358        parent_csi.set_state_parent(env, builder);
1359
1360        // We update the `VMStackLimits` of the parent of the continuation to be resumed
1361        // as well as the `VMRuntimeLimits`.
1362        // See the comment on `wasmtime_environ::VMStackChain` for a description
1363        // of the invariants that we maintain for the various stack limits.
1364        let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
1365        parent_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, true);
1366        resume_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1367
1368        // Install handlers in (soon to be) parent's VMHandlerList:
1369        // Let the i-th handler clause be (on $tag $block).
1370        // Then the i-th entry of the VMHandlerList will be the address of $tag.
1371        let handler_list = parent_csi.get_handler_list(env, builder);
1372
1373        if resumetable.len() > 0 {
1374            // Total number of handlers (suspend and switch).
1375            let handler_count = u32::try_from(resumetable.len()).unwrap();
1376            // Populate the Array's data ptr with a pointer to a sufficiently
1377            // large area on this stack.
1378            env.stack_switching_handler_list_buffer =
1379                Some(handler_list.allocate_or_reuse_stack_slot(
1380                    env,
1381                    builder,
1382                    handler_count,
1383                    env.stack_switching_handler_list_buffer,
1384                ));
1385
1386            let suspend_handler_count = suspend_handlers.len();
1387
1388            // All handlers, represented by the indices of the tags they handle.
1389            // All the suspend handlers come first, followed by all the switch handlers.
1390            let all_handlers = suspend_handlers
1391                .iter()
1392                .map(|(tag_index, _block)| *tag_index)
1393                .chain(switch_tags);
1394
1395            // Translate all tag indices to tag addresses (i.e., the corresponding *mut VMTagDefinition).
1396            let all_tag_addresses: Vec<ir::Value> = all_handlers
1397                .map(|tag_index| tag_address(env, builder, tag_index))
1398                .collect();
1399
1400            // Store all tag addresses in the handler list.
1401            handler_list.store_data_entries(env, builder, &all_tag_addresses);
1402
1403            // To enable distinguishing switch and suspend handlers when searching the handler list:
1404            // Store at which index the switch handlers start.
1405            let first_switch_handler_index = builder
1406                .ins()
1407                .iconst(I32, i64::try_from(suspend_handler_count).unwrap());
1408            parent_csi.set_first_switch_handler_index(env, builder, first_switch_handler_index);
1409        }
1410
1411        let resume_payload = ControlEffect::encode_resume(builder).to_u64();
1412
1413        // Note that the control context we use for switching is not the one in
1414        // (the stack of) resume_contref, but in (the stack of) last_ancestor!
1415        let fiber_stack = last_ancestor.get_fiber_stack(env, builder);
1416        let control_context_ptr = fiber_stack.load_control_context(env, builder);
1417
1418        let result =
1419            builder
1420                .ins()
1421                .stack_switch(control_context_ptr, control_context_ptr, resume_payload);
1422
1423        // At this point we know nothing about the continuation that just
1424        // suspended or returned. In particular, it does not have to be what we
1425        // called `resume_contref` earlier on. We must reload the information
1426        // about the now active continuation from the VMContext.
1427        let new_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1428
1429        // Now the parent contref (or initial stack) is active again
1430        vmctx_store_stack_chain(env, builder, vmctx, &original_stack_chain);
1431        parent_csi.set_state_running(env, builder);
1432
1433        // Just for consistency: Clear the handler list.
1434        handler_list.clear(env, builder, true);
1435        parent_csi.set_first_switch_handler_index(env, builder, zero);
1436
1437        // Extract the result and signal bit.
1438        let result = ControlEffect::from_u64(result);
1439        let signal = result.signal(builder);
1440
1441        // Jump to the return block if the result signal is 0, otherwise jump to
1442        // the suspend block.
1443        builder
1444            .ins()
1445            .brif(signal, suspend_block, &[], return_block, &[]);
1446
1447        (
1448            result,
1449            vm_runtime_limits_ptr,
1450            original_stack_chain,
1451            new_stack_chain,
1452        )
1453    };
1454
1455    // The suspend block: Only used when we suspended, not for returns.
1456    // Here we extract the index of the handler to use.
1457    let (handler_index, suspended_contref, suspended_contobj) = {
1458        builder.switch_to_block(suspend_block);
1459        builder.seal_block(suspend_block);
1460
1461        let suspended_continuation = new_stack_chain.unchecked_get_continuation();
1462        let mut suspended_continuation = helpers::VMContRef::new(suspended_continuation);
1463        let suspended_csi = suspended_continuation.common_stack_information(env, builder);
1464
1465        // Note that at the suspend site, we already
1466        // 1. Set the state of suspended_continuation to Suspended
1467        // 2. Set suspended_continuation.last_ancestor
1468        // 3. Broke the continuation chain at suspended_continuation.last_ancestor
1469
1470        // We store parts of the VMRuntimeLimits into the continuation that just suspended.
1471        suspended_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
1472
1473        // Afterwards (!), restore parts of the VMRuntimeLimits from the
1474        // parent of the suspended continuation (which is now active).
1475        let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1476        parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1477
1478        // Extract the handler index
1479        let handler_index = resume_result.handler_index(builder);
1480
1481        let revision = suspended_continuation.get_revision(env, builder);
1482        let suspended_contobj = fatpointer::construct(
1483            env,
1484            &mut builder.cursor(),
1485            revision,
1486            suspended_continuation.address,
1487        );
1488
1489        // We need to terminate this block before being allowed to switch to
1490        // another one.
1491        builder.ins().jump(dispatch_block, &[]);
1492
1493        (handler_index, suspended_continuation, suspended_contobj)
1494    };
1495
1496    // For technical reasons, the jump table needs to have a default
1497    // block. In our case, it should be unreachable, since the handler
1498    // index we dispatch on should correspond to a an actual handler
1499    // block in the jump table.
1500    let jt_default_block = builder.create_block();
1501    {
1502        builder.switch_to_block(jt_default_block);
1503        builder.set_cold_block(jt_default_block);
1504
1505        builder.ins().trap(crate::TRAP_UNREACHABLE);
1506    }
1507
1508    // We create a preamble block for each of the actual handler blocks: It
1509    // reads the necessary arguments and passes them to the actual handler
1510    // block, together with the continuation object.
1511    let target_preamble_blocks = {
1512        let mut preamble_blocks = vec![];
1513
1514        for &(handle_tag, target_block) in &suspend_handlers {
1515            let preamble_block = builder.create_block();
1516            preamble_blocks.push(preamble_block);
1517            builder.switch_to_block(preamble_block);
1518
1519            let param_types = env.tag_params(TagIndex::from_u32(handle_tag));
1520            let param_types: Vec<ir::Type> = param_types
1521                .iter()
1522                .map(|wty| crate::value_type(env.isa(), *wty))
1523                .collect();
1524
1525            let values = suspended_contref.values(env, builder);
1526            let mut suspend_args: Vec<BlockArg> = values
1527                .load_data_entries(env, builder, &param_types)
1528                .into_iter()
1529                .map(|v| BlockArg::Value(v))
1530                .collect();
1531
1532            // At the suspend site, we store the suspend args in the the
1533            // `values` buffer of the VMContRef that was active at the time that
1534            // the suspend instruction was performed.
1535            suspend_args.push(BlockArg::Value(suspended_contobj));
1536
1537            // We clear the suspend args. This is mostly for consistency. Note
1538            // that we don't zero out the data buffer, we still need it for the
1539
1540            values.clear(env, builder, false);
1541
1542            builder.ins().jump(target_block, &suspend_args);
1543        }
1544
1545        preamble_blocks
1546    };
1547
1548    // Dispatch block. All it does is jump to the right premable block based on
1549    // the handler index.
1550    {
1551        builder.switch_to_block(dispatch_block);
1552        builder.seal_block(dispatch_block);
1553
1554        let default_bc = builder.func.dfg.block_call(jt_default_block, &[]);
1555
1556        let adapter_bcs: Vec<BlockCall> = target_preamble_blocks
1557            .iter()
1558            .map(|b| builder.func.dfg.block_call(*b, &[]))
1559            .collect();
1560
1561        let jt_data = JumpTableData::new(default_bc, &adapter_bcs);
1562        let jt = builder.create_jump_table(jt_data);
1563
1564        builder.ins().br_table(handler_index, jt);
1565
1566        for preamble_block in target_preamble_blocks {
1567            builder.seal_block(preamble_block);
1568        }
1569        builder.seal_block(jt_default_block);
1570    }
1571
1572    // Return block: Jumped to by resume block if continuation
1573    // returned normally.
1574    {
1575        builder.switch_to_block(return_block);
1576        builder.seal_block(return_block);
1577
1578        // If we got a return signal, a continuation must have been running.
1579        let returned_contref = new_stack_chain.unchecked_get_continuation();
1580        let returned_contref = helpers::VMContRef::new(returned_contref);
1581
1582        // Restore parts of the VMRuntimeLimits from the parent of the
1583        // returned continuation (which is now active).
1584        let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1585        parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1586
1587        let returned_csi = returned_contref.common_stack_information(env, builder);
1588        returned_csi.set_state_returned(env, builder);
1589
1590        // Load the values returned by the continuation.
1591        let return_types: Vec<_> = env
1592            .continuation_returns(TypeIndex::from_u32(type_index))
1593            .iter()
1594            .map(|ty| crate::value_type(env.isa(), *ty))
1595            .collect();
1596        let payloads = returned_contref.args(env, builder);
1597        let return_values = payloads.load_data_entries(env, builder, &return_types);
1598        payloads.clear(env, builder, true);
1599
1600        Ok(return_values)
1601    }
1602}
1603
1604pub(crate) fn translate_suspend<'a>(
1605    env: &mut crate::func_environ::FuncEnvironment<'a>,
1606    builder: &mut FunctionBuilder,
1607    tag_index: u32,
1608    suspend_args: &[ir::Value],
1609    tag_return_types: &[ir::Type],
1610) -> Vec<ir::Value> {
1611    let tag_addr = tag_address(env, builder, tag_index);
1612
1613    let vmctx = env.vmctx_val(&mut builder.cursor());
1614    let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1615
1616    let (_, end_of_chain_contref, handler_index) =
1617        search_handler(env, builder, &active_stack_chain, tag_addr, true);
1618
1619    // If we get here, the search_handler logic succeeded (i.e., did not trap).
1620    // Thus, there is at least one parent, so we are not on the initial stack.
1621    // Can therefore extract continuation directly.
1622    let active_contref = active_stack_chain.unchecked_get_continuation();
1623    let active_contref = helpers::VMContRef::new(active_contref);
1624    let mut end_of_chain_contref = helpers::VMContRef::new(end_of_chain_contref);
1625
1626    active_contref.set_last_ancestor(env, builder, end_of_chain_contref.address);
1627
1628    // In the active_contref's `values` buffer, stack-allocate enough room so that we can
1629    // later store the following:
1630    // 1. The suspend arguments
1631    // 2. Afterwards, the tag return values
1632    let values = active_contref.values(env, builder);
1633    let required_capacity =
1634        u32::try_from(std::cmp::max(suspend_args.len(), tag_return_types.len()))
1635            .expect("Number of stack switching payloads should fit in u32");
1636
1637    if required_capacity > 0 {
1638        env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
1639            env,
1640            builder,
1641            required_capacity,
1642            env.stack_switching_values_buffer,
1643        ));
1644    }
1645
1646    if suspend_args.len() > 0 {
1647        values.store_data_entries(env, builder, suspend_args);
1648    }
1649
1650    // Set current continuation to suspended and break up handler chain.
1651    let active_contref_csi = active_contref.common_stack_information(env, builder);
1652    active_contref_csi.set_state_suspended(env, builder);
1653    let absent_chain_link = VMStackChain::absent(env, builder);
1654    end_of_chain_contref.set_parent_stack_chain(env, builder, &absent_chain_link);
1655
1656    let suspend_payload = ControlEffect::encode_suspend(builder, handler_index).to_u64();
1657
1658    // Note that the control context we use for switching is the one
1659    // at the end of the chain, not the one in active_contref!
1660    // This also means that stack_switch saves the information about
1661    // the current stack in the control context located in the stack
1662    // of end_of_chain_contref.
1663    let fiber_stack = end_of_chain_contref.get_fiber_stack(env, builder);
1664    let control_context_ptr = fiber_stack.load_control_context(env, builder);
1665
1666    builder
1667        .ins()
1668        .stack_switch(control_context_ptr, control_context_ptr, suspend_payload);
1669
1670    // The return values of the suspend instruction are the tag return values, saved in the `args` buffer.
1671    let values = active_contref.values(env, builder);
1672    let return_values = values.load_data_entries(env, builder, tag_return_types);
1673    // We effectively consume the values and discard the stack allocated buffer.
1674    values.clear(env, builder, true);
1675
1676    return_values
1677}
1678
1679pub(crate) fn translate_switch<'a>(
1680    env: &mut crate::func_environ::FuncEnvironment<'a>,
1681    builder: &mut FunctionBuilder,
1682    tag_index: u32,
1683    switchee_contobj: ir::Value,
1684    switch_args: &[ir::Value],
1685    return_types: &[ir::Type],
1686) -> WasmResult<Vec<ir::Value>> {
1687    let vmctx = env.vmctx_val(&mut builder.cursor());
1688
1689    // Check and increment revision on switchee continuation object (i.e., the
1690    // one being switched to). Logically, the switchee continuation extends from
1691    // `switchee_contref` to `switchee_contref.last_ancestor` (i.e., the end of
1692    // the parent chain starting at `switchee_contref`).
1693    let switchee_contref = {
1694        let (witness, target_contref) =
1695            fatpointer::deconstruct(env, &mut builder.cursor(), switchee_contobj);
1696
1697        // The typing rules for switch allow a null reference to be passed to it.
1698        builder
1699            .ins()
1700            .trapz(target_contref, crate::TRAP_NULL_REFERENCE);
1701
1702        let mut target_contref = helpers::VMContRef::new(target_contref);
1703
1704        let revision = target_contref.get_revision(env, builder);
1705        let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
1706        builder
1707            .ins()
1708            .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1709        let _next_revision = target_contref.incr_revision(env, builder, revision);
1710        target_contref
1711    };
1712
1713    // We create the "switcher continuation" (i.e., the one executing switch)
1714    // from the current execution context: Logically, it extends from the
1715    // continuation reference executing `switch` (subsequently called
1716    // `switcher_contref`) to the immediate child (called
1717    // `switcher_contref_last_ancestor`) of the stack with the corresponding
1718    // handler (saved in `handler_stack_chain`).
1719    let (
1720        switcher_contref,
1721        switcher_contobj,
1722        switcher_contref_last_ancestor,
1723        handler_stack_chain,
1724        vm_runtime_limits_ptr,
1725    ) = {
1726        let tag_addr = tag_address(env, builder, tag_index);
1727        let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1728        let (handler_stack_chain, last_ancestor, _handler_index) =
1729            search_handler(env, builder, &active_stack_chain, tag_addr, false);
1730        let mut last_ancestor = helpers::VMContRef::new(last_ancestor);
1731
1732        // If we get here, the search_handler logic succeeded (i.e., did not trap).
1733        // Thus, there is at least one parent, so we are not on the initial stack.
1734        // Can therefore extract continuation directly.
1735        let switcher_contref = active_stack_chain.unchecked_get_continuation();
1736        let mut switcher_contref = helpers::VMContRef::new(switcher_contref);
1737
1738        switcher_contref.set_last_ancestor(env, builder, last_ancestor.address);
1739
1740        // In the switcher_contref's `values` buffer, stack-allocate enough room so that we can
1741        // later store `tag_return_types.len()` when resuming the continuation.
1742        let values = switcher_contref.values(env, builder);
1743        let required_capacity = u32::try_from(return_types.len()).unwrap();
1744        if required_capacity > 0 {
1745            env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
1746                env,
1747                builder,
1748                required_capacity,
1749                env.stack_switching_values_buffer,
1750            ));
1751        }
1752
1753        let switcher_contref_csi = switcher_contref.common_stack_information(env, builder);
1754        switcher_contref_csi.set_state_suspended(env, builder);
1755        // We break off `switcher_contref` from the chain of active
1756        // continuations, by separating the link between `last_ancestor` and its
1757        // parent stack.
1758        let absent = VMStackChain::absent(env, builder);
1759        last_ancestor.set_parent_stack_chain(env, builder, &absent);
1760
1761        // Load current runtime limits from `VMContext` and store in the
1762        // switcher continuation.
1763        let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
1764        switcher_contref_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
1765
1766        let revision = switcher_contref.get_revision(env, builder);
1767        let new_contobj = fatpointer::construct(
1768            env,
1769            &mut builder.cursor(),
1770            revision,
1771            switcher_contref.address,
1772        );
1773
1774        (
1775            switcher_contref,
1776            new_contobj,
1777            last_ancestor,
1778            handler_stack_chain,
1779            vm_runtime_limits_ptr,
1780        )
1781    };
1782
1783    // Prepare switchee continuation:
1784    // - Store "ordinary" switch arguments as well as the contobj just
1785    //   synthesized from the current context (i.e., `switcher_contobj`) in the
1786    //   switchee continuation's payload buffer.
1787    // - Splice switchee's continuation chain with handler stack to form new
1788    //   overall chain of active continuations.
1789    let (switchee_contref_csi, switchee_contref_last_ancestor) = {
1790        let mut combined_payloads = switch_args.to_vec();
1791        combined_payloads.push(switcher_contobj);
1792        vmcontref_store_payloads(env, builder, &combined_payloads, switchee_contref.address);
1793
1794        let switchee_contref_csi = switchee_contref.common_stack_information(env, builder);
1795        switchee_contref_csi.set_state_running(env, builder);
1796
1797        let switchee_contref_last_ancestor = switchee_contref.get_last_ancestor(env, builder);
1798        let mut switchee_contref_last_ancestor =
1799            helpers::VMContRef::new(switchee_contref_last_ancestor);
1800
1801        switchee_contref_last_ancestor.set_parent_stack_chain(env, builder, &handler_stack_chain);
1802
1803        (switchee_contref_csi, switchee_contref_last_ancestor)
1804    };
1805
1806    // Update VMContext/Store: Update active continuation and `VMRuntimeLimits`.
1807    {
1808        vmctx_set_active_continuation(env, builder, vmctx, switchee_contref.address);
1809
1810        switchee_contref_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1811    }
1812
1813    // Perform actual stack switch
1814    {
1815        let switcher_last_ancestor_fs =
1816            switcher_contref_last_ancestor.get_fiber_stack(env, builder);
1817        let switcher_last_ancestor_cc =
1818            switcher_last_ancestor_fs.load_control_context(env, builder);
1819
1820        let switchee_last_ancestor_fs =
1821            switchee_contref_last_ancestor.get_fiber_stack(env, builder);
1822        let switchee_last_ancestor_cc =
1823            switchee_last_ancestor_fs.load_control_context(env, builder);
1824
1825        // The stack switch involves the following control contexts (e.g., IP,
1826        // SP, FP, ...):
1827        // - `switchee_last_ancestor_cc` contains the information to continue
1828        //    execution in the switchee/target continuation.
1829        // - `switcher_last_ancestor_cc` contains the information about how to
1830        //    continue execution once we suspend/return to the stack with the
1831        //    switch handler.
1832        //
1833        // In total, the following needs to happen:
1834        // 1. Load control context at `switchee_last_ancestor_cc` to perform
1835        //    stack switch.
1836        // 2. Move control context at `switcher_last_ancestor_cc` over to
1837        //    `switchee_last_ancestor_cc`.
1838        // 3. Upon actual switch, save current control context at
1839        //    `switcher_last_ancestor_cc`.
1840        //
1841        // We implement this as follows:
1842        // 1. We copy `switchee_last_ancestor_cc` to a temporary area on the
1843        //    stack (`tmp_control_context`).
1844        // 2. We copy `switcher_last_ancestor_cc` over to
1845        //    `switchee_last_ancestor_cc`.
1846        // 3. We invoke the stack switch instruction such that it reads from the
1847        //    temporary area, and writes to `switcher_last_ancestor_cc`.
1848        //
1849        // Note that the temporary area is only accessed once by the
1850        // `stack_switch` instruction emitted later in this block, meaning that we
1851        // don't have to worry about its lifetime.
1852        //
1853        // NOTE(frank-emrich) The implementation below results in one stack slot
1854        // being created per switch instruction, even though multiple switch
1855        // instructions in the same function could safely re-use the same stack
1856        // slot. Thus, we could implement logic for sharing the stack slot by
1857        // adding an appropriate field to `FuncEnvironment`.
1858        //
1859        // NOTE(frank-emrich) We could avoid the copying to a temporary area by
1860        // making `stack_switch` do all of the necessary moving itself. However,
1861        // that would be a rather ad-hoc change to how the instruction uses the
1862        // two pointers given to it.
1863
1864        let cctx_size = control_context_size(env.isa().triple())?;
1865        let slot_size = ir::StackSlotData::new(
1866            ir::StackSlotKind::ExplicitSlot,
1867            u32::from(cctx_size),
1868            u8::try_from(env.pointer_type().bytes()).unwrap(),
1869        );
1870        let slot = builder.create_sized_stack_slot(slot_size);
1871        let tmp_control_context = builder.ins().stack_addr(env.pointer_type(), slot, 0);
1872
1873        let flags = MemFlags::trusted();
1874        let mut offset: i32 = 0;
1875        while offset < i32::from(cctx_size) {
1876            // switchee_last_ancestor_cc -> tmp control context
1877            let tmp1 =
1878                builder
1879                    .ins()
1880                    .load(env.pointer_type(), flags, switchee_last_ancestor_cc, offset);
1881            builder
1882                .ins()
1883                .store(flags, tmp1, tmp_control_context, offset);
1884
1885            // switcher_last_ancestor_cc -> switchee_last_ancestor_cc
1886            let tmp2 =
1887                builder
1888                    .ins()
1889                    .load(env.pointer_type(), flags, switcher_last_ancestor_cc, offset);
1890            builder
1891                .ins()
1892                .store(flags, tmp2, switchee_last_ancestor_cc, offset);
1893
1894            offset += i32::try_from(env.pointer_type().bytes()).unwrap();
1895        }
1896
1897        let switch_payload = ControlEffect::encode_switch(builder).to_u64();
1898
1899        let _result = builder.ins().stack_switch(
1900            switcher_last_ancestor_cc,
1901            tmp_control_context,
1902            switch_payload,
1903        );
1904    }
1905
1906    // After switching back to the original stack: Load return values, they are
1907    // stored on the switcher continuation.
1908    let return_values = {
1909        let payloads = switcher_contref.values(env, builder);
1910        let return_values = payloads.load_data_entries(env, builder, return_types);
1911        // We consume the values and discard the buffer (allocated on this stack)
1912        payloads.clear(env, builder, true);
1913        return_values
1914    };
1915
1916    Ok(return_values)
1917}