1use cranelift_codegen::ir::BlockArg;
2use itertools::{Either, Itertools};
3
4use cranelift_codegen::ir::condcodes::*;
5use cranelift_codegen::ir::types::*;
6use cranelift_codegen::ir::{self, MemFlags};
7use cranelift_codegen::ir::{Block, BlockCall, InstBuilder, JumpTableData};
8use cranelift_frontend::FunctionBuilder;
9use wasmtime_environ::{PtrSize, TagIndex, TypeIndex, WasmResult, WasmValType, wasm_unsupported};
10
11fn control_context_size(triple: &target_lexicon::Triple) -> WasmResult<u8> {
12 match (triple.architecture, triple.operating_system) {
13 (target_lexicon::Architecture::X86_64, target_lexicon::OperatingSystem::Linux) => Ok(24),
14 _ => Err(wasm_unsupported!(
15 "stack switching not supported on {triple}"
16 )),
17 }
18}
19
20use super::control_effect::ControlEffect;
21use super::fatpointer;
22
23pub(crate) mod stack_switching_helpers {
25 use core::marker::PhantomData;
26 use cranelift_codegen::ir;
27 use cranelift_codegen::ir::InstBuilder;
28 use cranelift_codegen::ir::condcodes::IntCC;
29 use cranelift_codegen::ir::types::*;
30 use cranelift_codegen::ir::{StackSlot, StackSlotKind::*};
31 use cranelift_frontend::FunctionBuilder;
32 use wasmtime_environ::PtrSize;
33
34 pub(crate) trait VMHostArrayEntry {
37 fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32);
39 }
40
41 impl VMHostArrayEntry for u128 {
42 fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(_p: &P) -> (u8, u32) {
43 (16, 16)
44 }
45 }
46
47 impl<T> VMHostArrayEntry for *mut T {
48 fn vmhostarray_entry_layout<P: wasmtime_environ::PtrSize>(p: &P) -> (u8, u32) {
49 (p.size(), p.size().into())
50 }
51 }
52
53 #[derive(Copy, Clone)]
54 pub struct VMContRef {
55 pub address: ir::Value,
56 }
57
58 #[derive(Copy, Clone)]
59 pub struct VMHostArrayRef<T> {
60 address: ir::Value,
62
63 phantom: PhantomData<T>,
68 }
69
70 pub type VMPayloads = VMHostArrayRef<u128>;
71
72 pub type VMHandlerList = VMHostArrayRef<*mut u8>;
74
75 pub struct VMStackChain {
78 discriminant: ir::Value,
79 payload: ir::Value,
80 }
81
82 pub struct VMCommonStackInformation {
83 pub address: ir::Value,
84 }
85
86 pub struct VMContinuationStack {
88 tos_ptr: ir::Value,
92 }
93
94 impl VMContRef {
95 pub fn new(address: ir::Value) -> VMContRef {
96 VMContRef { address }
97 }
98
99 pub fn args<'a>(
100 &self,
101 env: &mut crate::func_environ::FuncEnvironment<'a>,
102 builder: &mut FunctionBuilder,
103 ) -> VMPayloads {
104 let offset: i64 = env.offsets.ptr.vmcontref_args().into();
105 let address = builder.ins().iadd_imm(self.address, offset);
106 VMPayloads::new(address)
107 }
108
109 pub fn values<'a>(
110 &self,
111 env: &mut crate::func_environ::FuncEnvironment<'a>,
112 builder: &mut FunctionBuilder,
113 ) -> VMPayloads {
114 let offset: i64 = env.offsets.ptr.vmcontref_values().into();
115 let address = builder.ins().iadd_imm(self.address, offset);
116 VMPayloads::new(address)
117 }
118
119 pub fn common_stack_information<'a>(
120 &self,
121 env: &mut crate::func_environ::FuncEnvironment<'a>,
122 builder: &mut FunctionBuilder,
123 ) -> VMCommonStackInformation {
124 let offset: i64 = env.offsets.ptr.vmcontref_common_stack_information().into();
125 let address = builder.ins().iadd_imm(self.address, offset);
126 VMCommonStackInformation { address }
127 }
128
129 pub fn set_parent_stack_chain<'a>(
133 &mut self,
134 env: &mut crate::func_environ::FuncEnvironment<'a>,
135 builder: &mut FunctionBuilder,
136 new_stack_chain: &VMStackChain,
137 ) {
138 let offset = env.offsets.ptr.vmcontref_parent_chain().into();
139 new_stack_chain.store(env, builder, self.address, offset)
140 }
141
142 pub fn get_parent_stack_chain<'a>(
146 &self,
147 env: &mut crate::func_environ::FuncEnvironment<'a>,
148 builder: &mut FunctionBuilder,
149 ) -> VMStackChain {
150 let offset = env.offsets.ptr.vmcontref_parent_chain().into();
151 VMStackChain::load(env, builder, self.address, offset, env.pointer_type())
152 }
153
154 pub fn set_last_ancestor<'a>(
155 &self,
156 env: &mut crate::func_environ::FuncEnvironment<'a>,
157 builder: &mut FunctionBuilder,
158 last_ancestor: ir::Value,
159 ) {
160 let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
161 let mem_flags = ir::MemFlags::trusted();
162 builder
163 .ins()
164 .store(mem_flags, last_ancestor, self.address, offset);
165 }
166
167 pub fn get_last_ancestor<'a>(
168 &self,
169 env: &mut crate::func_environ::FuncEnvironment<'a>,
170 builder: &mut FunctionBuilder,
171 ) -> ir::Value {
172 let offset: i32 = env.offsets.ptr.vmcontref_last_ancestor().into();
173 let mem_flags = ir::MemFlags::trusted();
174 builder
175 .ins()
176 .load(env.pointer_type(), mem_flags, self.address, offset)
177 }
178
179 pub fn get_revision<'a>(
182 &mut self,
183 env: &mut crate::func_environ::FuncEnvironment<'a>,
184 builder: &mut FunctionBuilder,
185 ) -> ir::Value {
186 let mem_flags = ir::MemFlags::trusted();
187 let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
188 let revision = builder.ins().load(I64, mem_flags, self.address, offset);
189 revision
190 }
191
192 pub fn incr_revision<'a>(
196 &mut self,
197 env: &mut crate::func_environ::FuncEnvironment<'a>,
198 builder: &mut FunctionBuilder,
199 revision: ir::Value,
200 ) -> ir::Value {
201 let mem_flags = ir::MemFlags::trusted();
202 let offset: i32 = env.offsets.ptr.vmcontref_revision().into();
203 let revision_plus1 = builder.ins().iadd_imm(revision, 1);
204 builder
205 .ins()
206 .store(mem_flags, revision_plus1, self.address, offset);
207 revision_plus1
208 }
209
210 pub fn get_fiber_stack<'a>(
211 &self,
212 env: &mut crate::func_environ::FuncEnvironment<'a>,
213 builder: &mut FunctionBuilder,
214 ) -> VMContinuationStack {
215 let offset: i64 = env.offsets.ptr.vmcontref_stack().into();
217 let fiber_stack_top_of_stack_ptr = builder.ins().iadd_imm(self.address, offset);
218 VMContinuationStack::new(fiber_stack_top_of_stack_ptr)
219 }
220 }
221
222 impl<T: VMHostArrayEntry> VMHostArrayRef<T> {
223 pub(crate) fn new(address: ir::Value) -> Self {
224 Self {
225 address,
226 phantom: PhantomData::default(),
227 }
228 }
229
230 fn get(&self, builder: &mut FunctionBuilder, ty: ir::Type, offset: i32) -> ir::Value {
231 let mem_flags = ir::MemFlags::trusted();
232 builder.ins().load(ty, mem_flags, self.address, offset)
233 }
234
235 fn set<U>(&self, builder: &mut FunctionBuilder, offset: i32, value: ir::Value) {
236 debug_assert_eq!(
237 builder.func.dfg.value_type(value),
238 Type::int_with_byte_size(u16::try_from(core::mem::size_of::<U>()).unwrap())
239 .unwrap()
240 );
241 let mem_flags = ir::MemFlags::trusted();
242 builder.ins().store(mem_flags, value, self.address, offset);
243 }
244
245 pub fn get_data<'a>(
246 &self,
247 env: &mut crate::func_environ::FuncEnvironment<'a>,
248 builder: &mut FunctionBuilder,
249 ) -> ir::Value {
250 let offset = env.offsets.ptr.vmhostarray_data().into();
251 self.get(builder, env.pointer_type(), offset)
252 }
253
254 pub fn get_length<'a>(
255 &self,
256 env: &mut crate::func_environ::FuncEnvironment<'a>,
257 builder: &mut FunctionBuilder,
258 ) -> ir::Value {
259 let offset = env.offsets.ptr.vmhostarray_length().into();
261 self.get(builder, I32, offset)
262 }
263
264 fn set_length<'a>(
265 &self,
266 env: &mut crate::func_environ::FuncEnvironment<'a>,
267 builder: &mut FunctionBuilder,
268 length: ir::Value,
269 ) {
270 let offset = env.offsets.ptr.vmhostarray_length().into();
272 self.set::<u32>(builder, offset, length);
273 }
274
275 fn set_capacity<'a>(
276 &self,
277 env: &mut crate::func_environ::FuncEnvironment<'a>,
278 builder: &mut FunctionBuilder,
279 capacity: ir::Value,
280 ) {
281 let offset = env.offsets.ptr.vmhostarray_capacity().into();
283 self.set::<u32>(builder, offset, capacity);
284 }
285
286 fn set_data<'a>(
287 &self,
288 env: &mut crate::func_environ::FuncEnvironment<'a>,
289 builder: &mut FunctionBuilder,
290 data: ir::Value,
291 ) {
292 debug_assert_eq!(builder.func.dfg.value_type(data), env.pointer_type());
293 let offset: i32 = env.offsets.ptr.vmhostarray_data().into();
294 let mem_flags = ir::MemFlags::trusted();
295 builder.ins().store(mem_flags, data, self.address, offset);
296 }
297
298 pub fn occupy_next_slots<'a>(
301 &self,
302 env: &mut crate::func_environ::FuncEnvironment<'a>,
303 builder: &mut FunctionBuilder,
304 arg_count: i32,
305 ) -> ir::Value {
306 let data = self.get_data(env, builder);
307 let original_length = self.get_length(env, builder);
308 let new_length = builder
309 .ins()
310 .iadd_imm(original_length, i64::from(arg_count));
311 self.set_length(env, builder, new_length);
312
313 let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
314 let original_length = builder.ins().uextend(I64, original_length);
315 let byte_offset = builder
316 .ins()
317 .imul_imm(original_length, i64::from(entry_size));
318 builder.ins().iadd(data, byte_offset)
319 }
320
321 pub fn allocate_or_reuse_stack_slot<'a>(
322 &self,
323 env: &mut crate::func_environ::FuncEnvironment<'a>,
324 builder: &mut FunctionBuilder,
325 required_capacity: u32,
326 existing_slot: Option<StackSlot>,
327 ) -> StackSlot {
328 let (align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
329 let required_size = required_capacity * entry_size;
330
331 match existing_slot {
332 Some(slot) if builder.func.sized_stack_slots[slot].size >= required_size => {
333 let slot_data = &builder.func.sized_stack_slots[slot];
334 debug_assert!(align <= slot_data.align_shift);
335 debug_assert_eq!(slot_data.kind, ExplicitSlot);
336 let existing_capacity = slot_data.size / entry_size;
337
338 let capacity_value = builder.ins().iconst(I32, i64::from(existing_capacity));
339 let existing_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
340
341 self.set_capacity(env, builder, capacity_value);
342 self.set_data(env, builder, existing_data);
343
344 slot
345 }
346 _ => {
347 let capacity_value = builder.ins().iconst(I32, i64::from(required_capacity));
348 let slot_size = ir::StackSlotData::new(
349 ir::StackSlotKind::ExplicitSlot,
350 required_size,
351 align,
352 );
353 let slot = builder.create_sized_stack_slot(slot_size);
354 let new_data = builder.ins().stack_addr(env.pointer_type(), slot, 0);
355
356 self.set_capacity(env, builder, capacity_value);
357 self.set_data(env, builder, new_data);
358
359 slot
360 }
361 }
362 }
363
364 pub fn load_data_entries<'a>(
368 &self,
369 env: &mut crate::func_environ::FuncEnvironment<'a>,
370 builder: &mut FunctionBuilder,
371 load_types: &[ir::Type],
372 ) -> Vec<ir::Value> {
373 let memflags = ir::MemFlags::trusted();
374
375 let data_start_pointer = self.get_data(env, builder);
376 let mut values = vec![];
377 let mut offset = 0;
378 let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
379 for valtype in load_types {
380 let val = builder
381 .ins()
382 .load(*valtype, memflags, data_start_pointer, offset);
383 values.push(val);
384 offset += i32::try_from(entry_size).unwrap();
385 }
386 values
387 }
388
389 pub fn store_data_entries<'a>(
394 &self,
395 env: &mut crate::func_environ::FuncEnvironment<'a>,
396 builder: &mut FunctionBuilder,
397 values: &[ir::Value],
398 ) {
399 let store_count = builder
400 .ins()
401 .iconst(I32, i64::try_from(values.len()).unwrap());
402
403 let (_align, entry_size) = T::vmhostarray_entry_layout(&env.offsets.ptr);
404
405 debug_assert!(values.iter().all(|val| {
406 let ty = builder.func.dfg.value_type(*val);
407 let size = ty.bytes();
408 size <= entry_size
409 }));
410
411 let memflags = ir::MemFlags::trusted();
412
413 let data_start_pointer = self.get_data(env, builder);
414
415 let mut offset = 0;
416 for value in values {
417 builder
418 .ins()
419 .store(memflags, *value, data_start_pointer, offset);
420 offset += i32::try_from(entry_size).unwrap();
421 }
422
423 self.set_length(env, builder, store_count);
424 }
425
426 pub fn clear<'a>(
427 &self,
428 env: &mut crate::func_environ::FuncEnvironment<'a>,
429 builder: &mut FunctionBuilder,
430 discard_buffer: bool,
431 ) {
432 let zero32 = builder.ins().iconst(I32, 0);
433 self.set_length(env, builder, zero32);
434
435 if discard_buffer {
436 let zero32 = builder.ins().iconst(I32, 0);
437 self.set_capacity(env, builder, zero32);
438
439 let zero_ptr = builder.ins().iconst(env.pointer_type(), 0);
440 self.set_data(env, builder, zero_ptr);
441 }
442 }
443 }
444
445 impl VMStackChain {
446 pub fn from_continuation<'a>(
448 env: &mut crate::func_environ::FuncEnvironment<'a>,
449 builder: &mut FunctionBuilder,
450 contref: ir::Value,
451 ) -> VMStackChain {
452 debug_assert_eq!(
453 env.offsets.ptr.size_of_vmstack_chain(),
454 2 * env.offsets.ptr.size()
455 );
456 let discriminant = wasmtime_environ::STACK_CHAIN_CONTINUATION_DISCRIMINANT;
457 let discriminant = builder
458 .ins()
459 .iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
460 VMStackChain {
461 discriminant,
462 payload: contref,
463 }
464 }
465
466 pub fn absent<'a>(
468 env: &mut crate::func_environ::FuncEnvironment<'a>,
469 builder: &mut FunctionBuilder,
470 ) -> VMStackChain {
471 debug_assert_eq!(
472 env.offsets.ptr.size_of_vmstack_chain(),
473 2 * env.offsets.ptr.size()
474 );
475 let discriminant = wasmtime_environ::STACK_CHAIN_ABSENT_DISCRIMINANT;
476 let discriminant = builder
477 .ins()
478 .iconst(env.pointer_type(), i64::try_from(discriminant).unwrap());
479 let zero_filler = builder.ins().iconst(env.pointer_type(), 0i64);
480 VMStackChain {
481 discriminant,
482 payload: zero_filler,
483 }
484 }
485
486 pub fn is_initial_stack<'a>(
487 &self,
488 _env: &mut crate::func_environ::FuncEnvironment<'a>,
489 builder: &mut FunctionBuilder,
490 ) -> ir::Value {
491 builder.ins().icmp_imm(
492 IntCC::Equal,
493 self.discriminant,
494 i64::try_from(wasmtime_environ::STACK_CHAIN_INITIAL_STACK_DISCRIMINANT).unwrap(),
495 )
496 }
497
498 pub fn to_raw_parts(&self) -> [ir::Value; 2] {
500 [self.discriminant, self.payload]
501 }
502
503 pub fn from_raw_parts(raw_data: [ir::Value; 2]) -> VMStackChain {
505 VMStackChain {
506 discriminant: raw_data[0],
507 payload: raw_data[1],
508 }
509 }
510
511 pub fn load<'a>(
513 _env: &mut crate::func_environ::FuncEnvironment<'a>,
514 builder: &mut FunctionBuilder,
515 pointer: ir::Value,
516 initial_offset: i32,
517 pointer_type: ir::Type,
518 ) -> VMStackChain {
519 let memflags = ir::MemFlags::trusted();
520 let mut offset = initial_offset;
521 let mut data = vec![];
522 for _ in 0..2 {
523 data.push(builder.ins().load(pointer_type, memflags, pointer, offset));
524 offset += i32::try_from(pointer_type.bytes()).unwrap();
525 }
526 let data = <[ir::Value; 2]>::try_from(data).unwrap();
527 Self::from_raw_parts(data)
528 }
529
530 pub fn store<'a>(
532 &self,
533 env: &mut crate::func_environ::FuncEnvironment<'a>,
534 builder: &mut FunctionBuilder,
535 target_pointer: ir::Value,
536 initial_offset: i32,
537 ) {
538 let memflags = ir::MemFlags::trusted();
539 let mut offset = initial_offset;
540 let data = self.to_raw_parts();
541
542 for value in data {
543 debug_assert_eq!(builder.func.dfg.value_type(value), env.pointer_type());
544 builder.ins().store(memflags, value, target_pointer, offset);
545 offset += i32::try_from(env.pointer_type().bytes()).unwrap();
546 }
547 }
548
549 pub fn unchecked_get_continuation(&self) -> ir::Value {
551 self.payload
552 }
553
554 pub fn get_common_stack_information<'a>(
558 &self,
559 env: &mut crate::func_environ::FuncEnvironment<'a>,
560 _builder: &mut FunctionBuilder,
561 ) -> VMCommonStackInformation {
562 let address = self.payload;
566
567 debug_assert_eq!(env.offsets.ptr.vmcontref_common_stack_information(), 0);
577 VMCommonStackInformation { address }
578 }
579 }
580
581 impl VMCommonStackInformation {
582 fn get_state_ptr<'a>(
583 &self,
584 env: &mut crate::func_environ::FuncEnvironment<'a>,
585 builder: &mut FunctionBuilder,
586 ) -> ir::Value {
587 let offset: i64 = env.offsets.ptr.vmcommon_stack_information_state().into();
588
589 builder.ins().iadd_imm(self.address, offset)
590 }
591
592 fn get_stack_limits_ptr<'a>(
593 &self,
594 env: &mut crate::func_environ::FuncEnvironment<'a>,
595 builder: &mut FunctionBuilder,
596 ) -> ir::Value {
597 let offset: i64 = env.offsets.ptr.vmcommon_stack_information_limits().into();
598
599 builder.ins().iadd_imm(self.address, offset)
600 }
601
602 fn load_state<'a>(
603 &self,
604 env: &mut crate::func_environ::FuncEnvironment<'a>,
605 builder: &mut FunctionBuilder,
606 ) -> ir::Value {
607 let mem_flags = ir::MemFlags::trusted();
608 let state_ptr = self.get_state_ptr(env, builder);
609
610 builder.ins().load(I32, mem_flags, state_ptr, 0)
611 }
612
613 fn set_state_no_payload<'a>(
614 &self,
615 env: &mut crate::func_environ::FuncEnvironment<'a>,
616 builder: &mut FunctionBuilder,
617 discriminant: u32,
618 ) {
619 let discriminant = builder.ins().iconst(I32, i64::from(discriminant));
620 let mem_flags = ir::MemFlags::trusted();
621 let state_ptr = self.get_state_ptr(env, builder);
622
623 builder.ins().store(mem_flags, discriminant, state_ptr, 0);
624 }
625
626 pub fn set_state_running<'a>(
627 &self,
628 env: &mut crate::func_environ::FuncEnvironment<'a>,
629 builder: &mut FunctionBuilder,
630 ) {
631 let discriminant = wasmtime_environ::STACK_STATE_RUNNING_DISCRIMINANT;
632 self.set_state_no_payload(env, builder, discriminant);
633 }
634
635 pub fn set_state_parent<'a>(
636 &self,
637 env: &mut crate::func_environ::FuncEnvironment<'a>,
638 builder: &mut FunctionBuilder,
639 ) {
640 let discriminant = wasmtime_environ::STACK_STATE_PARENT_DISCRIMINANT;
641 self.set_state_no_payload(env, builder, discriminant);
642 }
643
644 pub fn set_state_returned<'a>(
645 &self,
646 env: &mut crate::func_environ::FuncEnvironment<'a>,
647 builder: &mut FunctionBuilder,
648 ) {
649 let discriminant = wasmtime_environ::STACK_STATE_RETURNED_DISCRIMINANT;
650 self.set_state_no_payload(env, builder, discriminant);
651 }
652
653 pub fn set_state_suspended<'a>(
654 &self,
655 env: &mut crate::func_environ::FuncEnvironment<'a>,
656 builder: &mut FunctionBuilder,
657 ) {
658 let discriminant = wasmtime_environ::STACK_STATE_SUSPENDED_DISCRIMINANT;
659 self.set_state_no_payload(env, builder, discriminant);
660 }
661
662 pub fn was_invoked<'a>(
665 &self,
666 env: &mut crate::func_environ::FuncEnvironment<'a>,
667 builder: &mut FunctionBuilder,
668 ) -> ir::Value {
669 let actual_state = self.load_state(env, builder);
670 let allocated = wasmtime_environ::STACK_STATE_FRESH_DISCRIMINANT;
671 builder
672 .ins()
673 .icmp_imm(IntCC::NotEqual, actual_state, i64::from(allocated))
674 }
675
676 pub fn get_handler_list<'a>(
677 &self,
678 env: &mut crate::func_environ::FuncEnvironment<'a>,
679 builder: &mut FunctionBuilder,
680 ) -> VMHandlerList {
681 let offset: i64 = env.offsets.ptr.vmcommon_stack_information_handlers().into();
682 let address = builder.ins().iadd_imm(self.address, offset);
683 VMHandlerList::new(address)
684 }
685
686 pub fn get_first_switch_handler_index<'a>(
687 &self,
688 env: &mut crate::func_environ::FuncEnvironment<'a>,
689 builder: &mut FunctionBuilder,
690 ) -> ir::Value {
691 let memflags = ir::MemFlags::trusted();
693 let offset: i32 = env
694 .offsets
695 .ptr
696 .vmcommon_stack_information_first_switch_handler_index()
697 .into();
698 builder.ins().load(I32, memflags, self.address, offset)
699 }
700
701 pub fn set_first_switch_handler_index<'a>(
702 &self,
703 env: &mut crate::func_environ::FuncEnvironment<'a>,
704 builder: &mut FunctionBuilder,
705 value: ir::Value,
706 ) {
707 let memflags = ir::MemFlags::trusted();
709 let offset: i32 = env
710 .offsets
711 .ptr
712 .vmcommon_stack_information_first_switch_handler_index()
713 .into();
714 builder.ins().store(memflags, value, self.address, offset);
715 }
716
717 pub fn write_limits_to_vmcontext<'a>(
721 &self,
722 env: &mut crate::func_environ::FuncEnvironment<'a>,
723 builder: &mut FunctionBuilder,
724 vmruntime_limits_ptr: ir::Value,
725 ) {
726 let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
727
728 let memflags = ir::MemFlags::trusted();
729
730 let mut copy_to_vm_runtime_limits = |our_offset, their_offset| {
731 let our_value = builder.ins().load(
732 env.pointer_type(),
733 memflags,
734 stack_limits_ptr,
735 i32::from(our_offset),
736 );
737 builder.ins().store(
738 memflags,
739 our_value,
740 vmruntime_limits_ptr,
741 i32::from(their_offset),
742 );
743 };
744
745 let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
746 let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
747 let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
748 copy_to_vm_runtime_limits(
749 stack_limit_offset,
750 pointer_size.vmstore_context_stack_limit(),
751 );
752 copy_to_vm_runtime_limits(
753 last_wasm_entry_fp_offset,
754 pointer_size.vmstore_context_last_wasm_entry_fp(),
755 );
756 }
757
758 pub fn load_limits_from_vmcontext<'a>(
764 &self,
765 env: &mut crate::func_environ::FuncEnvironment<'a>,
766 builder: &mut FunctionBuilder,
767 vmruntime_limits_ptr: ir::Value,
768 load_stack_limit: bool,
769 ) {
770 let stack_limits_ptr = self.get_stack_limits_ptr(env, builder);
771
772 let memflags = ir::MemFlags::trusted();
773 let pointer_size = u8::try_from(env.pointer_type().bytes()).unwrap();
774
775 let mut copy = |runtime_limits_offset, stack_limits_offset| {
776 let from_vm_runtime_limits = builder.ins().load(
777 env.pointer_type(),
778 memflags,
779 vmruntime_limits_ptr,
780 runtime_limits_offset,
781 );
782 builder.ins().store(
783 memflags,
784 from_vm_runtime_limits,
785 stack_limits_ptr,
786 stack_limits_offset,
787 );
788 };
789
790 let last_wasm_entry_fp_offset = env.offsets.ptr.vmstack_limits_last_wasm_entry_fp();
791 copy(
792 pointer_size.vmstore_context_last_wasm_entry_fp(),
793 last_wasm_entry_fp_offset,
794 );
795
796 if load_stack_limit {
797 let stack_limit_offset = env.offsets.ptr.vmstack_limits_stack_limit();
798 copy(
799 pointer_size.vmstore_context_stack_limit(),
800 stack_limit_offset,
801 );
802 }
803 }
804 }
805
806 impl VMContinuationStack {
807 pub fn new(tos_ptr: ir::Value) -> Self {
811 Self { tos_ptr }
812 }
813
814 fn load_top_of_stack<'a>(
815 &self,
816 env: &mut crate::func_environ::FuncEnvironment<'a>,
817 builder: &mut FunctionBuilder,
818 ) -> ir::Value {
819 let mem_flags = ir::MemFlags::trusted();
820 builder
821 .ins()
822 .load(env.pointer_type(), mem_flags, self.tos_ptr, 0)
823 }
824
825 pub fn load_control_context<'a>(
828 &self,
829 env: &mut crate::func_environ::FuncEnvironment<'a>,
830 builder: &mut FunctionBuilder,
831 ) -> ir::Value {
832 let tos = self.load_top_of_stack(env, builder);
833 builder.ins().iadd_imm(tos, -0x18)
835 }
836 }
837}
838
839use helpers::VMStackChain;
840use stack_switching_helpers as helpers;
841
842pub(crate) fn vmcontref_store_payloads<'a>(
846 env: &mut crate::func_environ::FuncEnvironment<'a>,
847 builder: &mut FunctionBuilder,
848 values: &[ir::Value],
849 contref: ir::Value,
850) {
851 let count =
852 i32::try_from(values.len()).expect("Number of stack switching payloads should fit in i32");
853 if values.len() > 0 {
854 let use_args_block = builder.create_block();
855 let use_payloads_block = builder.create_block();
856 let store_data_block = builder.create_block();
857 builder.append_block_param(store_data_block, env.pointer_type());
858
859 let co = helpers::VMContRef::new(contref);
860 let csi = co.common_stack_information(env, builder);
861 let was_invoked = csi.was_invoked(env, builder);
862 builder
863 .ins()
864 .brif(was_invoked, use_payloads_block, &[], use_args_block, &[]);
865
866 {
867 builder.switch_to_block(use_args_block);
868 builder.seal_block(use_args_block);
869
870 let args = co.args(env, builder);
871 let ptr = args.occupy_next_slots(env, builder, count);
872
873 builder
874 .ins()
875 .jump(store_data_block, &[BlockArg::Value(ptr)]);
876 }
877
878 {
879 builder.switch_to_block(use_payloads_block);
880 builder.seal_block(use_payloads_block);
881
882 let payloads = co.values(env, builder);
883
884 let ptr = payloads.occupy_next_slots(env, builder, count);
887 builder
888 .ins()
889 .jump(store_data_block, &[BlockArg::Value(ptr)]);
890 }
891
892 {
893 builder.switch_to_block(store_data_block);
894 builder.seal_block(store_data_block);
895
896 let ptr = builder.block_params(store_data_block)[0];
897
898 let memflags = ir::MemFlags::trusted();
900 let mut offset = 0;
901 for value in values {
902 builder.ins().store(memflags, *value, ptr, offset);
903 offset += i32::from(env.offsets.ptr.maximum_value_size());
904 }
905 }
906 }
907}
908
909pub(crate) fn tag_address<'a>(
910 env: &mut crate::func_environ::FuncEnvironment<'a>,
911 builder: &mut FunctionBuilder,
912 index: u32,
913) -> ir::Value {
914 let vmctx = env.vmctx_val(&mut builder.cursor());
915 let tag_index = wasmtime_environ::TagIndex::from_u32(index);
916 let pointer_type = env.pointer_type();
917 if let Some(def_index) = env.module.defined_tag_index(tag_index) {
918 let offset = i32::try_from(env.offsets.vmctx_vmtag_definition(def_index)).unwrap();
919 builder.ins().iadd_imm(vmctx, i64::from(offset))
920 } else {
921 let offset = i32::try_from(env.offsets.vmctx_vmtag_import_from(tag_index)).unwrap();
922 builder.ins().load(
923 pointer_type,
924 ir::MemFlags::trusted().with_readonly(),
925 vmctx,
926 ir::immediates::Offset32::new(offset),
927 )
928 }
929}
930
931pub fn vmctx_load_stack_chain<'a>(
935 env: &mut crate::func_environ::FuncEnvironment<'a>,
936 builder: &mut FunctionBuilder,
937 vmctx: ir::Value,
938) -> VMStackChain {
939 let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
940
941 let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
943 let vm_store_context = builder.ins().load(
944 env.pointer_type(),
945 MemFlags::trusted(),
946 vmctx,
947 vm_store_context_offset,
948 );
949
950 VMStackChain::load(
951 env,
952 builder,
953 vm_store_context,
954 stack_chain_offset,
955 env.pointer_type(),
956 )
957}
958
959pub fn vmctx_store_stack_chain<'a>(
962 env: &mut crate::func_environ::FuncEnvironment<'a>,
963 builder: &mut FunctionBuilder,
964 vmctx: ir::Value,
965 stack_chain: &VMStackChain,
966) {
967 let stack_chain_offset = env.offsets.ptr.vmstore_context_stack_chain().into();
968
969 let vm_store_context_offset = env.offsets.ptr.vmctx_store_context();
971 let vm_store_context = builder.ins().load(
972 env.pointer_type(),
973 MemFlags::trusted(),
974 vmctx,
975 vm_store_context_offset,
976 );
977
978 stack_chain.store(env, builder, vm_store_context, stack_chain_offset)
979}
980
981pub fn vmctx_set_active_continuation<'a>(
984 env: &mut crate::func_environ::FuncEnvironment<'a>,
985 builder: &mut FunctionBuilder,
986 vmctx: ir::Value,
987 contref: ir::Value,
988) {
989 let chain = VMStackChain::from_continuation(env, builder, contref);
990 vmctx_store_stack_chain(env, builder, vmctx, &chain)
991}
992
993pub fn vmctx_load_vm_runtime_limits_ptr<'a>(
994 env: &mut crate::func_environ::FuncEnvironment<'a>,
995 builder: &mut FunctionBuilder,
996 vmctx: ir::Value,
997) -> ir::Value {
998 let pointer_type = env.pointer_type();
999 let offset = i32::from(env.offsets.ptr.vmctx_store_context());
1000
1001 let flags = ir::MemFlags::trusted().with_readonly();
1004
1005 builder.ins().load(pointer_type, flags, vmctx, offset)
1006}
1007
1008fn search_handler<'a>(
1052 env: &mut crate::func_environ::FuncEnvironment<'a>,
1053 builder: &mut FunctionBuilder,
1054 start: &helpers::VMStackChain,
1055 tag_address: ir::Value,
1056 search_suspend_handlers: bool,
1057) -> (VMStackChain, ir::Value, ir::Value) {
1058 let handle_link = builder.create_block();
1059 let begin_search_handler_list = builder.create_block();
1060 let try_index = builder.create_block();
1061 let compare_tags = builder.create_block();
1062 let on_match = builder.create_block();
1063 let on_no_match = builder.create_block();
1064 let block_args = start.to_raw_parts().map(|v| BlockArg::Value(v));
1065
1066 builder.ins().jump(handle_link, &block_args);
1068
1069 let chain_link = {
1071 builder.append_block_param(handle_link, env.pointer_type());
1072 builder.append_block_param(handle_link, env.pointer_type());
1073 builder.switch_to_block(handle_link);
1074
1075 let raw_parts = builder.block_params(handle_link);
1076 let chain_link = helpers::VMStackChain::from_raw_parts([raw_parts[0], raw_parts[1]]);
1077 let is_initial_stack = chain_link.is_initial_stack(env, builder);
1078 builder.ins().brif(
1079 is_initial_stack,
1080 on_no_match,
1081 &[],
1082 begin_search_handler_list,
1083 &[],
1084 );
1085 chain_link
1086 };
1087
1088 let (contref, parent_link, handler_list_data_ptr, end_range) = {
1090 builder.switch_to_block(begin_search_handler_list);
1091 let contref = chain_link.unchecked_get_continuation();
1092 let contref = helpers::VMContRef::new(contref);
1093
1094 let parent_link = contref.get_parent_stack_chain(env, builder);
1095 let parent_csi = parent_link.get_common_stack_information(env, builder);
1096
1097 let handlers = parent_csi.get_handler_list(env, builder);
1098 let handler_list_data_ptr = handlers.get_data(env, builder);
1099
1100 let first_switch_handler_index = parent_csi.get_first_switch_handler_index(env, builder);
1101
1102 let (begin_range, end_range) = if search_suspend_handlers {
1104 let zero = builder.ins().iconst(I32, 0);
1105 (zero, first_switch_handler_index)
1106 } else {
1107 let length = handlers.get_length(env, builder);
1108 (first_switch_handler_index, length)
1109 };
1110
1111 builder
1112 .ins()
1113 .jump(try_index, &[BlockArg::Value(begin_range)]);
1114
1115 (contref, parent_link, handler_list_data_ptr, end_range)
1116 };
1117
1118 let index = {
1120 builder.append_block_param(try_index, I32);
1121 builder.switch_to_block(try_index);
1122 let index = builder.block_params(try_index)[0];
1123
1124 let in_bounds = builder
1125 .ins()
1126 .icmp(IntCC::UnsignedLessThan, index, end_range);
1127 let block_args = parent_link.to_raw_parts().map(|v| BlockArg::Value(v));
1128 builder
1129 .ins()
1130 .brif(in_bounds, compare_tags, &[], handle_link, &block_args);
1131 index
1132 };
1133
1134 {
1136 builder.switch_to_block(compare_tags);
1137
1138 let base = handler_list_data_ptr;
1139 let entry_size = env.pointer_type().bytes();
1140 let offset = builder.ins().imul_imm(index, i64::from(entry_size));
1141 let offset = builder.ins().uextend(I64, offset);
1142 let entry_address = builder.ins().iadd(base, offset);
1143
1144 let memflags = ir::MemFlags::trusted();
1145
1146 let handled_tag = builder
1147 .ins()
1148 .load(env.pointer_type(), memflags, entry_address, 0);
1149
1150 let tags_match = builder.ins().icmp(IntCC::Equal, handled_tag, tag_address);
1151 let incremented_index = builder.ins().iadd_imm(index, 1);
1152 builder.ins().brif(
1153 tags_match,
1154 on_match,
1155 &[],
1156 try_index,
1157 &[BlockArg::Value(incremented_index)],
1158 );
1159 }
1160
1161 {
1163 builder.switch_to_block(on_no_match);
1164 builder.set_cold_block(on_no_match);
1165 builder.ins().trap(crate::TRAP_UNHANDLED_TAG);
1166 }
1167
1168 builder.seal_block(handle_link);
1169 builder.seal_block(begin_search_handler_list);
1170 builder.seal_block(try_index);
1171 builder.seal_block(compare_tags);
1172 builder.seal_block(on_match);
1173 builder.seal_block(on_no_match);
1174
1175 builder.switch_to_block(on_match);
1177
1178 (parent_link, contref.address, index)
1179}
1180
1181pub(crate) fn translate_cont_bind<'a>(
1182 env: &mut crate::func_environ::FuncEnvironment<'a>,
1183 builder: &mut FunctionBuilder,
1184 contobj: ir::Value,
1185 args: &[ir::Value],
1186) -> ir::Value {
1187 let (witness, contref) = fatpointer::deconstruct(env, &mut builder.cursor(), contobj);
1188
1189 builder.ins().trapz(contref, crate::TRAP_NULL_REFERENCE);
1191
1192 let mut vmcontref = helpers::VMContRef::new(contref);
1193 let revision = vmcontref.get_revision(env, builder);
1194 let evidence = builder.ins().icmp(IntCC::Equal, witness, revision);
1195 builder
1196 .ins()
1197 .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1198
1199 vmcontref_store_payloads(env, builder, args, contref);
1200
1201 let revision = vmcontref.incr_revision(env, builder, revision);
1202 let contobj = fatpointer::construct(env, &mut builder.cursor(), revision, contref);
1203 contobj
1204}
1205
1206pub(crate) fn translate_cont_new<'a>(
1207 env: &mut crate::func_environ::FuncEnvironment<'a>,
1208 builder: &mut FunctionBuilder,
1209 func: ir::Value,
1210 arg_types: &[WasmValType],
1211 return_types: &[WasmValType],
1212) -> WasmResult<ir::Value> {
1213 builder.ins().trapz(func, crate::TRAP_NULL_REFERENCE);
1215
1216 let nargs = builder
1217 .ins()
1218 .iconst(I32, i64::try_from(arg_types.len()).unwrap());
1219 let nreturns = builder
1220 .ins()
1221 .iconst(I32, i64::try_from(return_types.len()).unwrap());
1222
1223 let cont_new_func = super::builtins::cont_new(env, &mut builder.func)?;
1224 let vmctx = env.vmctx_val(&mut builder.cursor());
1225 let call_inst = builder
1226 .ins()
1227 .call(cont_new_func, &[vmctx, func, nargs, nreturns]);
1228 let contref = *builder.func.dfg.inst_results(call_inst).first().unwrap();
1229
1230 let tag = helpers::VMContRef::new(contref).get_revision(env, builder);
1231 let contobj = fatpointer::construct(env, &mut builder.cursor(), tag, contref);
1232 Ok(contobj)
1233}
1234
1235pub(crate) fn translate_resume<'a>(
1236 env: &mut crate::func_environ::FuncEnvironment<'a>,
1237 builder: &mut FunctionBuilder,
1238 type_index: u32,
1239 resume_contobj: ir::Value,
1240 resume_args: &[ir::Value],
1241 resumetable: &[(u32, Option<ir::Block>)],
1242) -> WasmResult<Vec<ir::Value>> {
1243 let resume_block = builder.create_block();
1278 let return_block = builder.create_block();
1279 let suspend_block = builder.create_block();
1280 let dispatch_block = builder.create_block();
1281
1282 let vmctx = env.vmctx_val(&mut builder.cursor());
1283
1284 let (suspend_handlers, switch_tags): (Vec<(u32, Block)>, Vec<u32>) = resumetable
1288 .iter()
1289 .partition_map(|(tag_index, block_opt)| match block_opt {
1290 Some(block) => Either::Left((*tag_index, *block)),
1291 None => Either::Right(*tag_index),
1292 });
1293
1294 builder.ins().jump(resume_block, &[]);
1297
1298 let (resume_result, vm_runtime_limits_ptr, original_stack_chain, new_stack_chain) = {
1300 builder.switch_to_block(resume_block);
1301 builder.seal_block(resume_block);
1302
1303 let (witness, resume_contref) =
1304 fatpointer::deconstruct(env, &mut builder.cursor(), resume_contobj);
1305
1306 builder
1308 .ins()
1309 .trapz(resume_contref, crate::TRAP_NULL_REFERENCE);
1310
1311 let mut vmcontref = helpers::VMContRef::new(resume_contref);
1312
1313 let revision = vmcontref.get_revision(env, builder);
1314 let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
1315 builder
1316 .ins()
1317 .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1318 let _next_revision = vmcontref.incr_revision(env, builder, revision);
1319
1320 if resume_args.len() > 0 {
1321 vmcontref_store_payloads(env, builder, resume_args, resume_contref);
1323 }
1324
1325 let mut last_ancestor = helpers::VMContRef::new(vmcontref.get_last_ancestor(env, builder));
1328
1329 let original_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1331 last_ancestor.set_parent_stack_chain(env, builder, &original_stack_chain);
1332
1333 let zero = builder.ins().iconst(env.pointer_type(), 0);
1335 vmcontref.set_last_ancestor(env, builder, zero);
1336
1337 vmctx_set_active_continuation(env, builder, vmctx, resume_contref);
1339
1340 let resume_contref = helpers::VMContRef::new(resume_contref);
1355 let resume_csi = resume_contref.common_stack_information(env, builder);
1356 let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1357 resume_csi.set_state_running(env, builder);
1358 parent_csi.set_state_parent(env, builder);
1359
1360 let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
1365 parent_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, true);
1366 resume_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1367
1368 let handler_list = parent_csi.get_handler_list(env, builder);
1372
1373 if resumetable.len() > 0 {
1374 let handler_count = u32::try_from(resumetable.len()).unwrap();
1376 env.stack_switching_handler_list_buffer =
1379 Some(handler_list.allocate_or_reuse_stack_slot(
1380 env,
1381 builder,
1382 handler_count,
1383 env.stack_switching_handler_list_buffer,
1384 ));
1385
1386 let suspend_handler_count = suspend_handlers.len();
1387
1388 let all_handlers = suspend_handlers
1391 .iter()
1392 .map(|(tag_index, _block)| *tag_index)
1393 .chain(switch_tags);
1394
1395 let all_tag_addresses: Vec<ir::Value> = all_handlers
1397 .map(|tag_index| tag_address(env, builder, tag_index))
1398 .collect();
1399
1400 handler_list.store_data_entries(env, builder, &all_tag_addresses);
1402
1403 let first_switch_handler_index = builder
1406 .ins()
1407 .iconst(I32, i64::try_from(suspend_handler_count).unwrap());
1408 parent_csi.set_first_switch_handler_index(env, builder, first_switch_handler_index);
1409 }
1410
1411 let resume_payload = ControlEffect::encode_resume(builder).to_u64();
1412
1413 let fiber_stack = last_ancestor.get_fiber_stack(env, builder);
1416 let control_context_ptr = fiber_stack.load_control_context(env, builder);
1417
1418 let result =
1419 builder
1420 .ins()
1421 .stack_switch(control_context_ptr, control_context_ptr, resume_payload);
1422
1423 let new_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1428
1429 vmctx_store_stack_chain(env, builder, vmctx, &original_stack_chain);
1431 parent_csi.set_state_running(env, builder);
1432
1433 handler_list.clear(env, builder, true);
1435 parent_csi.set_first_switch_handler_index(env, builder, zero);
1436
1437 let result = ControlEffect::from_u64(result);
1439 let signal = result.signal(builder);
1440
1441 builder
1444 .ins()
1445 .brif(signal, suspend_block, &[], return_block, &[]);
1446
1447 (
1448 result,
1449 vm_runtime_limits_ptr,
1450 original_stack_chain,
1451 new_stack_chain,
1452 )
1453 };
1454
1455 let (handler_index, suspended_contref, suspended_contobj) = {
1458 builder.switch_to_block(suspend_block);
1459 builder.seal_block(suspend_block);
1460
1461 let suspended_continuation = new_stack_chain.unchecked_get_continuation();
1462 let mut suspended_continuation = helpers::VMContRef::new(suspended_continuation);
1463 let suspended_csi = suspended_continuation.common_stack_information(env, builder);
1464
1465 suspended_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
1472
1473 let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1476 parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1477
1478 let handler_index = resume_result.handler_index(builder);
1480
1481 let revision = suspended_continuation.get_revision(env, builder);
1482 let suspended_contobj = fatpointer::construct(
1483 env,
1484 &mut builder.cursor(),
1485 revision,
1486 suspended_continuation.address,
1487 );
1488
1489 builder.ins().jump(dispatch_block, &[]);
1492
1493 (handler_index, suspended_continuation, suspended_contobj)
1494 };
1495
1496 let jt_default_block = builder.create_block();
1501 {
1502 builder.switch_to_block(jt_default_block);
1503 builder.set_cold_block(jt_default_block);
1504
1505 builder.ins().trap(crate::TRAP_UNREACHABLE);
1506 }
1507
1508 let target_preamble_blocks = {
1512 let mut preamble_blocks = vec![];
1513
1514 for &(handle_tag, target_block) in &suspend_handlers {
1515 let preamble_block = builder.create_block();
1516 preamble_blocks.push(preamble_block);
1517 builder.switch_to_block(preamble_block);
1518
1519 let param_types = env.tag_params(TagIndex::from_u32(handle_tag));
1520 let param_types: Vec<ir::Type> = param_types
1521 .iter()
1522 .map(|wty| crate::value_type(env.isa(), *wty))
1523 .collect();
1524
1525 let values = suspended_contref.values(env, builder);
1526 let mut suspend_args: Vec<BlockArg> = values
1527 .load_data_entries(env, builder, ¶m_types)
1528 .into_iter()
1529 .map(|v| BlockArg::Value(v))
1530 .collect();
1531
1532 suspend_args.push(BlockArg::Value(suspended_contobj));
1536
1537 values.clear(env, builder, false);
1541
1542 builder.ins().jump(target_block, &suspend_args);
1543 }
1544
1545 preamble_blocks
1546 };
1547
1548 {
1551 builder.switch_to_block(dispatch_block);
1552 builder.seal_block(dispatch_block);
1553
1554 let default_bc = builder.func.dfg.block_call(jt_default_block, &[]);
1555
1556 let adapter_bcs: Vec<BlockCall> = target_preamble_blocks
1557 .iter()
1558 .map(|b| builder.func.dfg.block_call(*b, &[]))
1559 .collect();
1560
1561 let jt_data = JumpTableData::new(default_bc, &adapter_bcs);
1562 let jt = builder.create_jump_table(jt_data);
1563
1564 builder.ins().br_table(handler_index, jt);
1565
1566 for preamble_block in target_preamble_blocks {
1567 builder.seal_block(preamble_block);
1568 }
1569 builder.seal_block(jt_default_block);
1570 }
1571
1572 {
1575 builder.switch_to_block(return_block);
1576 builder.seal_block(return_block);
1577
1578 let returned_contref = new_stack_chain.unchecked_get_continuation();
1580 let returned_contref = helpers::VMContRef::new(returned_contref);
1581
1582 let parent_csi = original_stack_chain.get_common_stack_information(env, builder);
1585 parent_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1586
1587 let returned_csi = returned_contref.common_stack_information(env, builder);
1588 returned_csi.set_state_returned(env, builder);
1589
1590 let return_types: Vec<_> = env
1592 .continuation_returns(TypeIndex::from_u32(type_index))
1593 .iter()
1594 .map(|ty| crate::value_type(env.isa(), *ty))
1595 .collect();
1596 let payloads = returned_contref.args(env, builder);
1597 let return_values = payloads.load_data_entries(env, builder, &return_types);
1598 payloads.clear(env, builder, true);
1599
1600 Ok(return_values)
1601 }
1602}
1603
1604pub(crate) fn translate_suspend<'a>(
1605 env: &mut crate::func_environ::FuncEnvironment<'a>,
1606 builder: &mut FunctionBuilder,
1607 tag_index: u32,
1608 suspend_args: &[ir::Value],
1609 tag_return_types: &[ir::Type],
1610) -> Vec<ir::Value> {
1611 let tag_addr = tag_address(env, builder, tag_index);
1612
1613 let vmctx = env.vmctx_val(&mut builder.cursor());
1614 let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1615
1616 let (_, end_of_chain_contref, handler_index) =
1617 search_handler(env, builder, &active_stack_chain, tag_addr, true);
1618
1619 let active_contref = active_stack_chain.unchecked_get_continuation();
1623 let active_contref = helpers::VMContRef::new(active_contref);
1624 let mut end_of_chain_contref = helpers::VMContRef::new(end_of_chain_contref);
1625
1626 active_contref.set_last_ancestor(env, builder, end_of_chain_contref.address);
1627
1628 let values = active_contref.values(env, builder);
1633 let required_capacity =
1634 u32::try_from(std::cmp::max(suspend_args.len(), tag_return_types.len()))
1635 .expect("Number of stack switching payloads should fit in u32");
1636
1637 if required_capacity > 0 {
1638 env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
1639 env,
1640 builder,
1641 required_capacity,
1642 env.stack_switching_values_buffer,
1643 ));
1644 }
1645
1646 if suspend_args.len() > 0 {
1647 values.store_data_entries(env, builder, suspend_args);
1648 }
1649
1650 let active_contref_csi = active_contref.common_stack_information(env, builder);
1652 active_contref_csi.set_state_suspended(env, builder);
1653 let absent_chain_link = VMStackChain::absent(env, builder);
1654 end_of_chain_contref.set_parent_stack_chain(env, builder, &absent_chain_link);
1655
1656 let suspend_payload = ControlEffect::encode_suspend(builder, handler_index).to_u64();
1657
1658 let fiber_stack = end_of_chain_contref.get_fiber_stack(env, builder);
1664 let control_context_ptr = fiber_stack.load_control_context(env, builder);
1665
1666 builder
1667 .ins()
1668 .stack_switch(control_context_ptr, control_context_ptr, suspend_payload);
1669
1670 let values = active_contref.values(env, builder);
1672 let return_values = values.load_data_entries(env, builder, tag_return_types);
1673 values.clear(env, builder, true);
1675
1676 return_values
1677}
1678
1679pub(crate) fn translate_switch<'a>(
1680 env: &mut crate::func_environ::FuncEnvironment<'a>,
1681 builder: &mut FunctionBuilder,
1682 tag_index: u32,
1683 switchee_contobj: ir::Value,
1684 switch_args: &[ir::Value],
1685 return_types: &[ir::Type],
1686) -> WasmResult<Vec<ir::Value>> {
1687 let vmctx = env.vmctx_val(&mut builder.cursor());
1688
1689 let switchee_contref = {
1694 let (witness, target_contref) =
1695 fatpointer::deconstruct(env, &mut builder.cursor(), switchee_contobj);
1696
1697 builder
1699 .ins()
1700 .trapz(target_contref, crate::TRAP_NULL_REFERENCE);
1701
1702 let mut target_contref = helpers::VMContRef::new(target_contref);
1703
1704 let revision = target_contref.get_revision(env, builder);
1705 let evidence = builder.ins().icmp(IntCC::Equal, revision, witness);
1706 builder
1707 .ins()
1708 .trapz(evidence, crate::TRAP_CONTINUATION_ALREADY_CONSUMED);
1709 let _next_revision = target_contref.incr_revision(env, builder, revision);
1710 target_contref
1711 };
1712
1713 let (
1720 switcher_contref,
1721 switcher_contobj,
1722 switcher_contref_last_ancestor,
1723 handler_stack_chain,
1724 vm_runtime_limits_ptr,
1725 ) = {
1726 let tag_addr = tag_address(env, builder, tag_index);
1727 let active_stack_chain = vmctx_load_stack_chain(env, builder, vmctx);
1728 let (handler_stack_chain, last_ancestor, _handler_index) =
1729 search_handler(env, builder, &active_stack_chain, tag_addr, false);
1730 let mut last_ancestor = helpers::VMContRef::new(last_ancestor);
1731
1732 let switcher_contref = active_stack_chain.unchecked_get_continuation();
1736 let mut switcher_contref = helpers::VMContRef::new(switcher_contref);
1737
1738 switcher_contref.set_last_ancestor(env, builder, last_ancestor.address);
1739
1740 let values = switcher_contref.values(env, builder);
1743 let required_capacity = u32::try_from(return_types.len()).unwrap();
1744 if required_capacity > 0 {
1745 env.stack_switching_values_buffer = Some(values.allocate_or_reuse_stack_slot(
1746 env,
1747 builder,
1748 required_capacity,
1749 env.stack_switching_values_buffer,
1750 ));
1751 }
1752
1753 let switcher_contref_csi = switcher_contref.common_stack_information(env, builder);
1754 switcher_contref_csi.set_state_suspended(env, builder);
1755 let absent = VMStackChain::absent(env, builder);
1759 last_ancestor.set_parent_stack_chain(env, builder, &absent);
1760
1761 let vm_runtime_limits_ptr = vmctx_load_vm_runtime_limits_ptr(env, builder, vmctx);
1764 switcher_contref_csi.load_limits_from_vmcontext(env, builder, vm_runtime_limits_ptr, false);
1765
1766 let revision = switcher_contref.get_revision(env, builder);
1767 let new_contobj = fatpointer::construct(
1768 env,
1769 &mut builder.cursor(),
1770 revision,
1771 switcher_contref.address,
1772 );
1773
1774 (
1775 switcher_contref,
1776 new_contobj,
1777 last_ancestor,
1778 handler_stack_chain,
1779 vm_runtime_limits_ptr,
1780 )
1781 };
1782
1783 let (switchee_contref_csi, switchee_contref_last_ancestor) = {
1790 let mut combined_payloads = switch_args.to_vec();
1791 combined_payloads.push(switcher_contobj);
1792 vmcontref_store_payloads(env, builder, &combined_payloads, switchee_contref.address);
1793
1794 let switchee_contref_csi = switchee_contref.common_stack_information(env, builder);
1795 switchee_contref_csi.set_state_running(env, builder);
1796
1797 let switchee_contref_last_ancestor = switchee_contref.get_last_ancestor(env, builder);
1798 let mut switchee_contref_last_ancestor =
1799 helpers::VMContRef::new(switchee_contref_last_ancestor);
1800
1801 switchee_contref_last_ancestor.set_parent_stack_chain(env, builder, &handler_stack_chain);
1802
1803 (switchee_contref_csi, switchee_contref_last_ancestor)
1804 };
1805
1806 {
1808 vmctx_set_active_continuation(env, builder, vmctx, switchee_contref.address);
1809
1810 switchee_contref_csi.write_limits_to_vmcontext(env, builder, vm_runtime_limits_ptr);
1811 }
1812
1813 {
1815 let switcher_last_ancestor_fs =
1816 switcher_contref_last_ancestor.get_fiber_stack(env, builder);
1817 let switcher_last_ancestor_cc =
1818 switcher_last_ancestor_fs.load_control_context(env, builder);
1819
1820 let switchee_last_ancestor_fs =
1821 switchee_contref_last_ancestor.get_fiber_stack(env, builder);
1822 let switchee_last_ancestor_cc =
1823 switchee_last_ancestor_fs.load_control_context(env, builder);
1824
1825 let cctx_size = control_context_size(env.isa().triple())?;
1865 let slot_size = ir::StackSlotData::new(
1866 ir::StackSlotKind::ExplicitSlot,
1867 u32::from(cctx_size),
1868 u8::try_from(env.pointer_type().bytes()).unwrap(),
1869 );
1870 let slot = builder.create_sized_stack_slot(slot_size);
1871 let tmp_control_context = builder.ins().stack_addr(env.pointer_type(), slot, 0);
1872
1873 let flags = MemFlags::trusted();
1874 let mut offset: i32 = 0;
1875 while offset < i32::from(cctx_size) {
1876 let tmp1 =
1878 builder
1879 .ins()
1880 .load(env.pointer_type(), flags, switchee_last_ancestor_cc, offset);
1881 builder
1882 .ins()
1883 .store(flags, tmp1, tmp_control_context, offset);
1884
1885 let tmp2 =
1887 builder
1888 .ins()
1889 .load(env.pointer_type(), flags, switcher_last_ancestor_cc, offset);
1890 builder
1891 .ins()
1892 .store(flags, tmp2, switchee_last_ancestor_cc, offset);
1893
1894 offset += i32::try_from(env.pointer_type().bytes()).unwrap();
1895 }
1896
1897 let switch_payload = ControlEffect::encode_switch(builder).to_u64();
1898
1899 let _result = builder.ins().stack_switch(
1900 switcher_last_ancestor_cc,
1901 tmp_control_context,
1902 switch_payload,
1903 );
1904 }
1905
1906 let return_values = {
1909 let payloads = switcher_contref.values(env, builder);
1910 let return_values = payloads.load_data_entries(env, builder, return_types);
1911 payloads.clear(env, builder, true);
1913 return_values
1914 };
1915
1916 Ok(return_values)
1917}