wasmtime_internal_cranelift/
func_environ.rs

1mod gc;
2pub(crate) mod stack_switching;
3
4use crate::compiler::Compiler;
5use crate::translate::{
6    FuncTranslationStacks, GlobalVariable, Heap, HeapData, StructFieldsVec, TableData, TableSize,
7    TargetEnvironment,
8};
9use crate::{BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT};
10use cranelift_codegen::cursor::FuncCursor;
11use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
12use cranelift_codegen::ir::immediates::{Imm64, Offset32, V128Imm};
13use cranelift_codegen::ir::pcc::Fact;
14use cranelift_codegen::ir::{self, BlockArg, ExceptionTableData, ExceptionTableItem, types};
15use cranelift_codegen::ir::{ArgumentPurpose, ConstantData, Function, InstBuilder, MemFlags};
16use cranelift_codegen::ir::{Block, ExceptionTag, types::*};
17use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa};
18use cranelift_entity::packed_option::{PackedOption, ReservedValue};
19use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
20use cranelift_frontend::Variable;
21use cranelift_frontend::{FuncInstBuilder, FunctionBuilder};
22use smallvec::{SmallVec, smallvec};
23use std::mem;
24use wasmparser::{Operator, WasmFeatures};
25use wasmtime_environ::{
26    BuiltinFunctionIndex, DataIndex, DefinedFuncIndex, ElemIndex, EngineOrModuleTypeIndex,
27    FuncIndex, FuncKey, GlobalIndex, IndexType, Memory, MemoryIndex, Module,
28    ModuleInternedTypeIndex, ModuleTranslation, ModuleTypesBuilder, PtrSize, Table, TableIndex,
29    TagIndex, TripleExt, Tunables, TypeConvert, TypeIndex, VMOffsets, WasmCompositeInnerType,
30    WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult, WasmValType,
31};
32use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
33use wasmtime_math::f64_cvt_to_int_bounds;
34
35#[derive(Debug)]
36pub(crate) enum Extension {
37    Sign,
38    Zero,
39}
40
41/// A struct with an `Option<ir::FuncRef>` member for every builtin
42/// function, to de-duplicate constructing/getting its function.
43pub(crate) struct BuiltinFunctions {
44    types: BuiltinFunctionSignatures,
45
46    builtins: [Option<ir::FuncRef>; BuiltinFunctionIndex::len() as usize],
47}
48
49impl BuiltinFunctions {
50    fn new(compiler: &Compiler) -> Self {
51        Self {
52            types: BuiltinFunctionSignatures::new(compiler),
53            builtins: [None; BuiltinFunctionIndex::len() as usize],
54        }
55    }
56
57    fn load_builtin(&mut self, func: &mut Function, builtin: BuiltinFunctionIndex) -> ir::FuncRef {
58        let cache = &mut self.builtins[builtin.index() as usize];
59        if let Some(f) = cache {
60            return *f;
61        }
62        let signature = func.import_signature(self.types.wasm_signature(builtin));
63        let key = FuncKey::WasmToBuiltinTrampoline(builtin);
64        let (namespace, index) = key.into_raw_parts();
65        let name = ir::ExternalName::User(
66            func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
67        );
68        let f = func.import_function(ir::ExtFuncData {
69            name,
70            signature,
71            colocated: true,
72        });
73        *cache = Some(f);
74        f
75    }
76}
77
78// Generate helper methods on `BuiltinFunctions` above for each named builtin
79// as well.
80macro_rules! declare_function_signatures {
81    ($(
82        $( #[$attr:meta] )*
83        $name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
84    )*) => {
85        $(impl BuiltinFunctions {
86            $( #[$attr] )*
87            pub(crate) fn $name(&mut self, func: &mut Function) -> ir::FuncRef {
88                self.load_builtin(func, BuiltinFunctionIndex::$name())
89            }
90        })*
91    };
92}
93wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
94
95/// The `FuncEnvironment` implementation for use by the `ModuleEnvironment`.
96pub struct FuncEnvironment<'module_environment> {
97    compiler: &'module_environment Compiler,
98    isa: &'module_environment (dyn TargetIsa + 'module_environment),
99    pub(crate) module: &'module_environment Module,
100    types: &'module_environment ModuleTypesBuilder,
101    wasm_func_ty: &'module_environment WasmFuncType,
102    sig_ref_to_ty: SecondaryMap<ir::SigRef, Option<&'module_environment WasmFuncType>>,
103    needs_gc_heap: bool,
104    entities: WasmEntities,
105
106    #[cfg(feature = "gc")]
107    ty_to_gc_layout: std::collections::HashMap<
108        wasmtime_environ::ModuleInternedTypeIndex,
109        wasmtime_environ::GcLayout,
110    >,
111
112    #[cfg(feature = "gc")]
113    gc_heap: Option<Heap>,
114
115    /// The Cranelift global holding the GC heap's base address.
116    #[cfg(feature = "gc")]
117    gc_heap_base: Option<ir::GlobalValue>,
118
119    /// The Cranelift global holding the GC heap's base address.
120    #[cfg(feature = "gc")]
121    gc_heap_bound: Option<ir::GlobalValue>,
122
123    translation: &'module_environment ModuleTranslation<'module_environment>,
124
125    /// Heaps implementing WebAssembly linear memories.
126    heaps: PrimaryMap<Heap, HeapData>,
127
128    /// The Cranelift global holding the vmctx address.
129    vmctx: Option<ir::GlobalValue>,
130
131    /// The Cranelift global for our vmctx's `*mut VMStoreContext`.
132    vm_store_context: Option<ir::GlobalValue>,
133
134    /// The PCC memory type describing the vmctx layout, if we're
135    /// using PCC.
136    pcc_vmctx_memtype: Option<ir::MemoryType>,
137
138    /// Caches of signatures for builtin functions.
139    builtin_functions: BuiltinFunctions,
140
141    /// Offsets to struct fields accessed by JIT code.
142    pub(crate) offsets: VMOffsets<u8>,
143
144    tunables: &'module_environment Tunables,
145
146    /// A function-local variable which stores the cached value of the amount of
147    /// fuel remaining to execute. If used this is modified frequently so it's
148    /// stored locally as a variable instead of always referenced from the field
149    /// in `*const VMStoreContext`
150    fuel_var: cranelift_frontend::Variable,
151
152    /// A cached epoch deadline value, when performing epoch-based
153    /// interruption. Loaded from `VMStoreContext` and reloaded after
154    /// any yield.
155    epoch_deadline_var: cranelift_frontend::Variable,
156
157    /// A cached pointer to the per-Engine epoch counter, when
158    /// performing epoch-based interruption. Initialized in the
159    /// function prologue. We prefer to use a variable here rather
160    /// than reload on each check because it's better to let the
161    /// regalloc keep it in a register if able; if not, it can always
162    /// spill, and this isn't any worse than reloading each time.
163    epoch_ptr_var: cranelift_frontend::Variable,
164
165    fuel_consumed: i64,
166
167    /// A `GlobalValue` in CLIF which represents the stack limit.
168    ///
169    /// Typically this resides in the `stack_limit` value of `ir::Function` but
170    /// that requires signal handlers on the host and when that's disabled this
171    /// is here with an explicit check instead. Note that the explicit check is
172    /// always present even if this is a "leaf" function, as we have to call
173    /// into the host to trap when signal handlers are disabled.
174    pub(crate) stack_limit_at_function_entry: Option<ir::GlobalValue>,
175
176    /// Used by the stack switching feature. If set, we have a allocated a
177    /// slot on this function's stack to be used for the
178    /// current stack's `handler_list` field.
179    stack_switching_handler_list_buffer: Option<ir::StackSlot>,
180
181    /// Used by the stack switching feature. If set, we have a allocated a
182    /// slot on this function's stack to be used for the
183    /// current continuation's `values` field.
184    stack_switching_values_buffer: Option<ir::StackSlot>,
185}
186
187impl<'module_environment> FuncEnvironment<'module_environment> {
188    pub fn new(
189        compiler: &'module_environment Compiler,
190        translation: &'module_environment ModuleTranslation<'module_environment>,
191        types: &'module_environment ModuleTypesBuilder,
192        wasm_func_ty: &'module_environment WasmFuncType,
193    ) -> Self {
194        let tunables = compiler.tunables();
195        let builtin_functions = BuiltinFunctions::new(compiler);
196
197        // This isn't used during translation, so squash the warning about this
198        // being unused from the compiler.
199        let _ = BuiltinFunctions::raise;
200
201        Self {
202            isa: compiler.isa(),
203            module: &translation.module,
204            compiler,
205            types,
206            wasm_func_ty,
207            sig_ref_to_ty: SecondaryMap::default(),
208            needs_gc_heap: false,
209            entities: WasmEntities::default(),
210
211            #[cfg(feature = "gc")]
212            ty_to_gc_layout: std::collections::HashMap::new(),
213            #[cfg(feature = "gc")]
214            gc_heap: None,
215            #[cfg(feature = "gc")]
216            gc_heap_base: None,
217            #[cfg(feature = "gc")]
218            gc_heap_bound: None,
219
220            heaps: PrimaryMap::default(),
221            vmctx: None,
222            vm_store_context: None,
223            pcc_vmctx_memtype: None,
224            builtin_functions,
225            offsets: VMOffsets::new(compiler.isa().pointer_bytes(), &translation.module),
226            tunables,
227            fuel_var: Variable::reserved_value(),
228            epoch_deadline_var: Variable::reserved_value(),
229            epoch_ptr_var: Variable::reserved_value(),
230
231            // Start with at least one fuel being consumed because even empty
232            // functions should consume at least some fuel.
233            fuel_consumed: 1,
234
235            translation,
236
237            stack_limit_at_function_entry: None,
238
239            stack_switching_handler_list_buffer: None,
240            stack_switching_values_buffer: None,
241        }
242    }
243
244    pub(crate) fn pointer_type(&self) -> ir::Type {
245        self.isa.pointer_type()
246    }
247
248    pub(crate) fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
249        self.vmctx.unwrap_or_else(|| {
250            let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
251            if self.isa.flags().enable_pcc() {
252                // Create a placeholder memtype for the vmctx; we'll
253                // add fields to it as we lazily create HeapData
254                // structs and global values.
255                let vmctx_memtype = func.create_memory_type(ir::MemoryTypeData::Struct {
256                    size: 0,
257                    fields: vec![],
258                });
259
260                self.pcc_vmctx_memtype = Some(vmctx_memtype);
261                func.global_value_facts[vmctx] = Some(Fact::Mem {
262                    ty: vmctx_memtype,
263                    min_offset: 0,
264                    max_offset: 0,
265                    nullable: false,
266                });
267            }
268
269            self.vmctx = Some(vmctx);
270            vmctx
271        })
272    }
273
274    pub(crate) fn vmctx_val(&mut self, pos: &mut FuncCursor<'_>) -> ir::Value {
275        let pointer_type = self.pointer_type();
276        let vmctx = self.vmctx(&mut pos.func);
277        pos.ins().global_value(pointer_type, vmctx)
278    }
279
280    fn get_table_copy_func(
281        &mut self,
282        func: &mut Function,
283        dst_table_index: TableIndex,
284        src_table_index: TableIndex,
285    ) -> (ir::FuncRef, usize, usize) {
286        let sig = self.builtin_functions.table_copy(func);
287        (
288            sig,
289            dst_table_index.as_u32() as usize,
290            src_table_index.as_u32() as usize,
291        )
292    }
293
294    #[cfg(feature = "threads")]
295    fn get_memory_atomic_wait(&mut self, func: &mut Function, ty: ir::Type) -> ir::FuncRef {
296        match ty {
297            I32 => self.builtin_functions.memory_atomic_wait32(func),
298            I64 => self.builtin_functions.memory_atomic_wait64(func),
299            x => panic!("get_memory_atomic_wait unsupported type: {x:?}"),
300        }
301    }
302
303    fn get_global_location(
304        &mut self,
305        func: &mut ir::Function,
306        index: GlobalIndex,
307    ) -> (ir::GlobalValue, i32) {
308        let pointer_type = self.pointer_type();
309        let vmctx = self.vmctx(func);
310        if let Some(def_index) = self.module.defined_global_index(index) {
311            let offset = i32::try_from(self.offsets.vmctx_vmglobal_definition(def_index)).unwrap();
312            (vmctx, offset)
313        } else {
314            let from_offset = self.offsets.vmctx_vmglobal_import_from(index);
315            let global = func.create_global_value(ir::GlobalValueData::Load {
316                base: vmctx,
317                offset: Offset32::new(i32::try_from(from_offset).unwrap()),
318                global_type: pointer_type,
319                flags: MemFlags::trusted().with_readonly().with_can_move(),
320            });
321            (global, 0)
322        }
323    }
324
325    /// Get or create the `ir::Global` for the `*mut VMStoreContext` in our
326    /// `VMContext`.
327    fn get_vmstore_context_ptr_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
328        if let Some(ptr) = self.vm_store_context {
329            return ptr;
330        }
331
332        let offset = self.offsets.ptr.vmctx_store_context();
333        let base = self.vmctx(func);
334        let ptr = func.create_global_value(ir::GlobalValueData::Load {
335            base,
336            offset: Offset32::new(offset.into()),
337            global_type: self.pointer_type(),
338            flags: ir::MemFlags::trusted().with_readonly().with_can_move(),
339        });
340        self.vm_store_context = Some(ptr);
341        ptr
342    }
343
344    /// Get the `*mut VMStoreContext` value for our `VMContext`.
345    fn get_vmstore_context_ptr(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
346        let global = self.get_vmstore_context_ptr_global(&mut builder.func);
347        builder.ins().global_value(self.pointer_type(), global)
348    }
349
350    fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
351        // On function entry we load the amount of fuel into a function-local
352        // `self.fuel_var` to make fuel modifications fast locally. This cache
353        // is then periodically flushed to the Store-defined location in
354        // `VMStoreContext` later.
355        debug_assert!(self.fuel_var.is_reserved_value());
356        self.fuel_var = builder.declare_var(ir::types::I64);
357        self.fuel_load_into_var(builder);
358        self.fuel_check(builder);
359    }
360
361    fn fuel_function_exit(&mut self, builder: &mut FunctionBuilder<'_>) {
362        // On exiting the function we need to be sure to save the fuel we have
363        // cached locally in `self.fuel_var` back into the Store-defined
364        // location.
365        self.fuel_save_from_var(builder);
366    }
367
368    fn fuel_before_op(
369        &mut self,
370        op: &Operator<'_>,
371        builder: &mut FunctionBuilder<'_>,
372        reachable: bool,
373    ) {
374        if !reachable {
375            // In unreachable code we shouldn't have any leftover fuel we
376            // haven't accounted for since the reason for us to become
377            // unreachable should have already added it to `self.fuel_var`.
378            debug_assert_eq!(self.fuel_consumed, 0);
379            return;
380        }
381
382        self.fuel_consumed += match op {
383            // Nop and drop generate no code, so don't consume fuel for them.
384            Operator::Nop | Operator::Drop => 0,
385
386            // Control flow may create branches, but is generally cheap and
387            // free, so don't consume fuel. Note the lack of `if` since some
388            // cost is incurred with the conditional check.
389            Operator::Block { .. }
390            | Operator::Loop { .. }
391            | Operator::Unreachable
392            | Operator::Return
393            | Operator::Else
394            | Operator::End => 0,
395
396            // everything else, just call it one operation.
397            _ => 1,
398        };
399
400        match op {
401            // Exiting a function (via a return or unreachable) or otherwise
402            // entering a different function (via a call) means that we need to
403            // update the fuel consumption in `VMStoreContext` because we're
404            // about to move control out of this function itself and the fuel
405            // may need to be read.
406            //
407            // Before this we need to update the fuel counter from our own cost
408            // leading up to this function call, and then we can store
409            // `self.fuel_var` into `VMStoreContext`.
410            Operator::Unreachable
411            | Operator::Return
412            | Operator::CallIndirect { .. }
413            | Operator::Call { .. }
414            | Operator::ReturnCall { .. }
415            | Operator::ReturnCallRef { .. }
416            | Operator::ReturnCallIndirect { .. }
417            | Operator::Throw { .. } | Operator::ThrowRef => {
418                self.fuel_increment_var(builder);
419                self.fuel_save_from_var(builder);
420            }
421
422            // To ensure all code preceding a loop is only counted once we
423            // update the fuel variable on entry.
424            Operator::Loop { .. }
425
426            // Entering into an `if` block means that the edge we take isn't
427            // known until runtime, so we need to update our fuel consumption
428            // before we take the branch.
429            | Operator::If { .. }
430
431            // Control-flow instructions mean that we're moving to the end/exit
432            // of a block somewhere else. That means we need to update the fuel
433            // counter since we're effectively terminating our basic block.
434            | Operator::Br { .. }
435            | Operator::BrIf { .. }
436            | Operator::BrTable { .. }
437            | Operator::BrOnNull { .. }
438            | Operator::BrOnNonNull { .. }
439            | Operator::BrOnCast { .. }
440            | Operator::BrOnCastFail { .. }
441
442            // Exiting a scope means that we need to update the fuel
443            // consumption because there are multiple ways to exit a scope and
444            // this is the only time we have to account for instructions
445            // executed so far.
446            | Operator::End
447
448            // This is similar to `end`, except that it's only the terminator
449            // for an `if` block. The same reasoning applies though in that we
450            // are terminating a basic block and need to update the fuel
451            // variable.
452            | Operator::Else => self.fuel_increment_var(builder),
453
454            // This is a normal instruction where the fuel is buffered to later
455            // get added to `self.fuel_var`.
456            //
457            // Note that we generally ignore instructions which may trap and
458            // therefore result in exiting a block early. Current usage of fuel
459            // means that it's not too important to account for a precise amount
460            // of fuel consumed but rather "close to the actual amount" is good
461            // enough. For 100% precise counting, however, we'd probably need to
462            // not only increment but also save the fuel amount more often
463            // around trapping instructions. (see the `unreachable` instruction
464            // case above)
465            //
466            // Note that `Block` is specifically omitted from incrementing the
467            // fuel variable. Control flow entering a `block` is unconditional
468            // which means it's effectively executing straight-line code. We'll
469            // update the counter when exiting a block, but we shouldn't need to
470            // do so upon entering a block.
471            _ => {}
472        }
473    }
474
475    fn fuel_after_op(&mut self, op: &Operator<'_>, builder: &mut FunctionBuilder<'_>) {
476        // After a function call we need to reload our fuel value since the
477        // function may have changed it.
478        match op {
479            Operator::Call { .. } | Operator::CallIndirect { .. } => {
480                self.fuel_load_into_var(builder);
481            }
482            _ => {}
483        }
484    }
485
486    /// Adds `self.fuel_consumed` to the `fuel_var`, zero-ing out the amount of
487    /// fuel consumed at that point.
488    fn fuel_increment_var(&mut self, builder: &mut FunctionBuilder<'_>) {
489        let consumption = mem::replace(&mut self.fuel_consumed, 0);
490        if consumption == 0 {
491            return;
492        }
493
494        let fuel = builder.use_var(self.fuel_var);
495        let fuel = builder.ins().iadd_imm(fuel, consumption);
496        builder.def_var(self.fuel_var, fuel);
497    }
498
499    /// Loads the fuel consumption value from `VMStoreContext` into `self.fuel_var`
500    fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
501        let (addr, offset) = self.fuel_addr_offset(builder);
502        let fuel = builder
503            .ins()
504            .load(ir::types::I64, ir::MemFlags::trusted(), addr, offset);
505        builder.def_var(self.fuel_var, fuel);
506    }
507
508    /// Stores the fuel consumption value from `self.fuel_var` into
509    /// `VMStoreContext`.
510    fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) {
511        let (addr, offset) = self.fuel_addr_offset(builder);
512        let fuel_consumed = builder.use_var(self.fuel_var);
513        builder
514            .ins()
515            .store(ir::MemFlags::trusted(), fuel_consumed, addr, offset);
516    }
517
518    /// Returns the `(address, offset)` of the fuel consumption within
519    /// `VMStoreContext`, used to perform loads/stores later.
520    fn fuel_addr_offset(
521        &mut self,
522        builder: &mut FunctionBuilder<'_>,
523    ) -> (ir::Value, ir::immediates::Offset32) {
524        let vmstore_ctx = self.get_vmstore_context_ptr(builder);
525        (
526            vmstore_ctx,
527            i32::from(self.offsets.ptr.vmstore_context_fuel_consumed()).into(),
528        )
529    }
530
531    /// Checks the amount of remaining, and if we've run out of fuel we call
532    /// the out-of-fuel function.
533    fn fuel_check(&mut self, builder: &mut FunctionBuilder) {
534        self.fuel_increment_var(builder);
535        let out_of_gas_block = builder.create_block();
536        let continuation_block = builder.create_block();
537
538        // Note that our fuel is encoded as adding positive values to a
539        // negative number. Whenever the negative number goes positive that
540        // means we ran out of fuel.
541        //
542        // Compare to see if our fuel is positive, and if so we ran out of gas.
543        // Otherwise we can continue on like usual.
544        let zero = builder.ins().iconst(ir::types::I64, 0);
545        let fuel = builder.use_var(self.fuel_var);
546        let cmp = builder
547            .ins()
548            .icmp(IntCC::SignedGreaterThanOrEqual, fuel, zero);
549        builder
550            .ins()
551            .brif(cmp, out_of_gas_block, &[], continuation_block, &[]);
552        builder.seal_block(out_of_gas_block);
553
554        // If we ran out of gas then we call our out-of-gas intrinsic and it
555        // figures out what to do. Note that this may raise a trap, or do
556        // something like yield to an async runtime. In either case we don't
557        // assume what happens and handle the case the intrinsic returns.
558        //
559        // Note that we save/reload fuel around this since the out-of-gas
560        // intrinsic may alter how much fuel is in the system.
561        builder.switch_to_block(out_of_gas_block);
562        self.fuel_save_from_var(builder);
563        let out_of_gas = self.builtin_functions.out_of_gas(builder.func);
564        let vmctx = self.vmctx_val(&mut builder.cursor());
565        builder.ins().call(out_of_gas, &[vmctx]);
566        self.fuel_load_into_var(builder);
567        builder.ins().jump(continuation_block, &[]);
568        builder.seal_block(continuation_block);
569
570        builder.switch_to_block(continuation_block);
571    }
572
573    fn epoch_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
574        debug_assert!(self.epoch_deadline_var.is_reserved_value());
575        self.epoch_deadline_var = builder.declare_var(ir::types::I64);
576        // Let epoch_check_full load the current deadline and call def_var
577
578        debug_assert!(self.epoch_ptr_var.is_reserved_value());
579        self.epoch_ptr_var = builder.declare_var(self.pointer_type());
580        let epoch_ptr = self.epoch_ptr(builder);
581        builder.def_var(self.epoch_ptr_var, epoch_ptr);
582
583        // We must check for an epoch change when entering a
584        // function. Why? Why aren't checks at loops sufficient to
585        // bound runtime to O(|static program size|)?
586        //
587        // The reason is that one can construct a "zip-bomb-like"
588        // program with exponential-in-program-size runtime, with no
589        // backedges (loops), by building a tree of function calls: f0
590        // calls f1 ten times, f1 calls f2 ten times, etc. E.g., nine
591        // levels of this yields a billion function calls with no
592        // backedges. So we can't do checks only at backedges.
593        //
594        // In this "call-tree" scenario, and in fact in any program
595        // that uses calls as a sort of control flow to try to evade
596        // backedge checks, a check at every function entry is
597        // sufficient. Then, combined with checks at every backedge
598        // (loop) the longest runtime between checks is bounded by the
599        // straightline length of any function body.
600        let continuation_block = builder.create_block();
601        let cur_epoch_value = self.epoch_load_current(builder);
602        self.epoch_check_full(builder, cur_epoch_value, continuation_block);
603    }
604
605    #[cfg(feature = "wmemcheck")]
606    fn hook_malloc_exit(&mut self, builder: &mut FunctionBuilder, retvals: &[ir::Value]) {
607        let check_malloc = self.builtin_functions.check_malloc(builder.func);
608        let vmctx = self.vmctx_val(&mut builder.cursor());
609        let func_args = builder
610            .func
611            .dfg
612            .block_params(builder.func.layout.entry_block().unwrap());
613        let len = if func_args.len() < 3 {
614            return;
615        } else {
616            // If a function named `malloc` has at least one argument, we assume the
617            // first argument is the requested allocation size.
618            func_args[2]
619        };
620        let retval = if retvals.len() < 1 {
621            return;
622        } else {
623            retvals[0]
624        };
625        builder.ins().call(check_malloc, &[vmctx, retval, len]);
626    }
627
628    #[cfg(feature = "wmemcheck")]
629    fn hook_free_exit(&mut self, builder: &mut FunctionBuilder) {
630        let check_free = self.builtin_functions.check_free(builder.func);
631        let vmctx = self.vmctx_val(&mut builder.cursor());
632        let func_args = builder
633            .func
634            .dfg
635            .block_params(builder.func.layout.entry_block().unwrap());
636        let ptr = if func_args.len() < 3 {
637            return;
638        } else {
639            // If a function named `free` has at least one argument, we assume the
640            // first argument is a pointer to memory.
641            func_args[2]
642        };
643        builder.ins().call(check_free, &[vmctx, ptr]);
644    }
645
646    fn epoch_ptr(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
647        let vmctx = self.vmctx(builder.func);
648        let pointer_type = self.pointer_type();
649        let base = builder.ins().global_value(pointer_type, vmctx);
650        let offset = i32::from(self.offsets.ptr.vmctx_epoch_ptr());
651        let epoch_ptr = builder
652            .ins()
653            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
654        epoch_ptr
655    }
656
657    fn epoch_load_current(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
658        let addr = builder.use_var(self.epoch_ptr_var);
659        builder.ins().load(
660            ir::types::I64,
661            ir::MemFlags::trusted(),
662            addr,
663            ir::immediates::Offset32::new(0),
664        )
665    }
666
667    fn epoch_check(&mut self, builder: &mut FunctionBuilder<'_>) {
668        let continuation_block = builder.create_block();
669
670        // Load new epoch and check against the cached deadline.
671        let cur_epoch_value = self.epoch_load_current(builder);
672        self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
673
674        // At this point we've noticed that the epoch has exceeded our
675        // cached deadline. However the real deadline may have been
676        // updated (within another yield) during some function that we
677        // called in the meantime, so reload the cache and check again.
678        self.epoch_check_full(builder, cur_epoch_value, continuation_block);
679    }
680
681    fn epoch_check_cached(
682        &mut self,
683        builder: &mut FunctionBuilder,
684        cur_epoch_value: ir::Value,
685        continuation_block: ir::Block,
686    ) {
687        let new_epoch_block = builder.create_block();
688        builder.set_cold_block(new_epoch_block);
689
690        let epoch_deadline = builder.use_var(self.epoch_deadline_var);
691        let cmp = builder.ins().icmp(
692            IntCC::UnsignedGreaterThanOrEqual,
693            cur_epoch_value,
694            epoch_deadline,
695        );
696        builder
697            .ins()
698            .brif(cmp, new_epoch_block, &[], continuation_block, &[]);
699        builder.seal_block(new_epoch_block);
700
701        builder.switch_to_block(new_epoch_block);
702    }
703
704    fn epoch_check_full(
705        &mut self,
706        builder: &mut FunctionBuilder,
707        cur_epoch_value: ir::Value,
708        continuation_block: ir::Block,
709    ) {
710        // We keep the deadline cached in a register to speed the checks
711        // in the common case (between epoch ticks) but we want to do a
712        // precise check here by reloading the cache first.
713        let vmstore_ctx = self.get_vmstore_context_ptr(builder);
714        let deadline = builder.ins().load(
715            ir::types::I64,
716            ir::MemFlags::trusted(),
717            vmstore_ctx,
718            ir::immediates::Offset32::new(self.offsets.ptr.vmstore_context_epoch_deadline() as i32),
719        );
720        builder.def_var(self.epoch_deadline_var, deadline);
721        self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
722
723        let new_epoch = self.builtin_functions.new_epoch(builder.func);
724        let vmctx = self.vmctx_val(&mut builder.cursor());
725        // new_epoch() returns the new deadline, so we don't have to
726        // reload it.
727        let call = builder.ins().call(new_epoch, &[vmctx]);
728        let new_deadline = *builder.func.dfg.inst_results(call).first().unwrap();
729        builder.def_var(self.epoch_deadline_var, new_deadline);
730        builder.ins().jump(continuation_block, &[]);
731        builder.seal_block(continuation_block);
732
733        builder.switch_to_block(continuation_block);
734    }
735
736    /// Get the Memory for the given index.
737    fn memory(&self, index: MemoryIndex) -> Memory {
738        self.module.memories[index]
739    }
740
741    /// Get the Table for the given index.
742    fn table(&self, index: TableIndex) -> Table {
743        self.module.tables[index]
744    }
745
746    /// Cast the value to I64 and sign extend if necessary.
747    ///
748    /// Returns the value casted to I64.
749    fn cast_index_to_i64(
750        &self,
751        pos: &mut FuncCursor<'_>,
752        val: ir::Value,
753        index_type: IndexType,
754    ) -> ir::Value {
755        match index_type {
756            IndexType::I32 => pos.ins().uextend(I64, val),
757            IndexType::I64 => val,
758        }
759    }
760
761    /// Convert the target pointer-sized integer `val` into the memory/table's index type.
762    ///
763    /// For memory, `val` is holding a memory length (or the `-1` `memory.grow`-failed sentinel).
764    /// For table, `val` is holding a table length.
765    ///
766    /// This might involve extending or truncating it depending on the memory/table's
767    /// index type and the target's pointer type.
768    fn convert_pointer_to_index_type(
769        &self,
770        mut pos: FuncCursor<'_>,
771        val: ir::Value,
772        index_type: IndexType,
773        // When it is a memory and the memory is using single-byte pages,
774        // we need to handle the truncation differently. See comments below.
775        //
776        // When it is a table, this should be set to false.
777        single_byte_pages: bool,
778    ) -> ir::Value {
779        let desired_type = index_type_to_ir_type(index_type);
780        let pointer_type = self.pointer_type();
781        assert_eq!(pos.func.dfg.value_type(val), pointer_type);
782
783        // The current length is of type `pointer_type` but we need to fit it
784        // into `desired_type`. We are guaranteed that the result will always
785        // fit, so we just need to do the right ireduce/sextend here.
786        if pointer_type == desired_type {
787            val
788        } else if pointer_type.bits() > desired_type.bits() {
789            pos.ins().ireduce(desired_type, val)
790        } else {
791            // We have a 64-bit memory/table on a 32-bit host -- this combo doesn't
792            // really make a whole lot of sense to do from a user perspective
793            // but that is neither here nor there. We want to logically do an
794            // unsigned extend *except* when we are given the `-1` sentinel,
795            // which we must preserve as `-1` in the wider type.
796            match single_byte_pages {
797                false => {
798                    // In the case that we have default page sizes, we can
799                    // always sign extend, since valid memory lengths (in pages)
800                    // never have their sign bit set, and so if the sign bit is
801                    // set then this must be the `-1` sentinel, which we want to
802                    // preserve through the extension.
803                    //
804                    // When it comes to table, `single_byte_pages` should have always been set to false.
805                    // Then we simply do a signed extension.
806                    pos.ins().sextend(desired_type, val)
807                }
808                true => {
809                    // For single-byte pages, we have to explicitly check for
810                    // `-1` and choose whether to do an unsigned extension or
811                    // return a larger `-1` because there are valid memory
812                    // lengths (in pages) that have the sign bit set.
813                    let extended = pos.ins().uextend(desired_type, val);
814                    let neg_one = pos.ins().iconst(desired_type, -1);
815                    let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1);
816                    pos.ins().select(is_failure, neg_one, extended)
817                }
818            }
819        }
820    }
821
822    fn get_or_init_func_ref_table_elem(
823        &mut self,
824        builder: &mut FunctionBuilder,
825        table_index: TableIndex,
826        index: ir::Value,
827        cold_blocks: bool,
828    ) -> ir::Value {
829        let pointer_type = self.pointer_type();
830        let table_data = self.get_or_create_table(builder.func, table_index);
831
832        // To support lazy initialization of table
833        // contents, we check for a null entry here, and
834        // if null, we take a slow-path that invokes a
835        // libcall.
836        let (table_entry_addr, flags) = table_data.prepare_table_addr(self, builder, index);
837        let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
838
839        if !self.tunables.table_lazy_init {
840            return value;
841        }
842
843        // Mask off the "initialized bit". See documentation on
844        // FUNCREF_INIT_BIT in crates/environ/src/ref_bits.rs for more
845        // details. Note that `FUNCREF_MASK` has type `usize` which may not be
846        // appropriate for the target architecture. Right now its value is
847        // always -2 so assert that part doesn't change and then thread through
848        // -2 as the immediate.
849        assert_eq!(FUNCREF_MASK as isize, -2);
850        let value_masked = builder.ins().band_imm(value, Imm64::from(-2));
851
852        let null_block = builder.create_block();
853        let continuation_block = builder.create_block();
854        if cold_blocks {
855            builder.set_cold_block(null_block);
856            builder.set_cold_block(continuation_block);
857        }
858        let result_param = builder.append_block_param(continuation_block, pointer_type);
859        builder.set_cold_block(null_block);
860
861        builder.ins().brif(
862            value,
863            continuation_block,
864            &[value_masked.into()],
865            null_block,
866            &[],
867        );
868        builder.seal_block(null_block);
869
870        builder.switch_to_block(null_block);
871        let index_type = self.table(table_index).idx_type;
872        let table_index = builder.ins().iconst(I32, table_index.index() as i64);
873        let lazy_init = self
874            .builtin_functions
875            .table_get_lazy_init_func_ref(builder.func);
876        let vmctx = self.vmctx_val(&mut builder.cursor());
877        let index = self.cast_index_to_i64(&mut builder.cursor(), index, index_type);
878        let call_inst = builder.ins().call(lazy_init, &[vmctx, table_index, index]);
879        let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
880        builder
881            .ins()
882            .jump(continuation_block, &[returned_entry.into()]);
883        builder.seal_block(continuation_block);
884
885        builder.switch_to_block(continuation_block);
886        result_param
887    }
888
889    #[cfg(feature = "wmemcheck")]
890    fn check_malloc_start(&mut self, builder: &mut FunctionBuilder) {
891        let malloc_start = self.builtin_functions.malloc_start(builder.func);
892        let vmctx = self.vmctx_val(&mut builder.cursor());
893        builder.ins().call(malloc_start, &[vmctx]);
894    }
895
896    #[cfg(feature = "wmemcheck")]
897    fn check_free_start(&mut self, builder: &mut FunctionBuilder) {
898        let free_start = self.builtin_functions.free_start(builder.func);
899        let vmctx = self.vmctx_val(&mut builder.cursor());
900        builder.ins().call(free_start, &[vmctx]);
901    }
902
903    #[cfg(feature = "wmemcheck")]
904    fn current_func_name(&self, builder: &mut FunctionBuilder) -> Option<&str> {
905        let func_index = match &builder.func.name {
906            ir::UserFuncName::User(user) => FuncIndex::from_u32(user.index),
907            _ => {
908                panic!("function name not a UserFuncName::User as expected")
909            }
910        };
911        self.translation
912            .debuginfo
913            .name_section
914            .func_names
915            .get(&func_index)
916            .copied()
917    }
918
919    /// Proof-carrying code: create a memtype describing an empty
920    /// runtime struct (to be updated later).
921    fn create_empty_struct_memtype(&self, func: &mut ir::Function) -> ir::MemoryType {
922        func.create_memory_type(ir::MemoryTypeData::Struct {
923            size: 0,
924            fields: vec![],
925        })
926    }
927
928    /// Proof-carrying code: add a new field to a memtype used to
929    /// describe a runtime struct. A memory region of type `memtype`
930    /// will have a pointer at `offset` pointing to another memory
931    /// region of type `pointee`. `readonly` indicates whether the
932    /// PCC-checked code is expected to update this field or not.
933    fn add_field_to_memtype(
934        &self,
935        func: &mut ir::Function,
936        memtype: ir::MemoryType,
937        offset: u32,
938        pointee: ir::MemoryType,
939        readonly: bool,
940    ) {
941        let ptr_size = self.pointer_type().bytes();
942        match &mut func.memory_types[memtype] {
943            ir::MemoryTypeData::Struct { size, fields } => {
944                *size = std::cmp::max(*size, offset.checked_add(ptr_size).unwrap().into());
945                fields.push(ir::MemoryTypeField {
946                    ty: self.pointer_type(),
947                    offset: offset.into(),
948                    readonly,
949                    fact: Some(ir::Fact::Mem {
950                        ty: pointee,
951                        min_offset: 0,
952                        max_offset: 0,
953                        nullable: false,
954                    }),
955                });
956
957                // Sort fields by offset -- we need to do this now
958                // because we may create an arbitrary number of
959                // memtypes for imported memories and we don't
960                // otherwise track them.
961                fields.sort_by_key(|f| f.offset);
962            }
963            _ => panic!("Cannot add field to non-struct memtype"),
964        }
965    }
966
967    /// Create an `ir::Global` that does `load(ptr + offset)` and, when PCC and
968    /// memory types are enabled, adds a field to the pointer's memory type for
969    /// this value we are loading.
970    pub(crate) fn global_load_with_memory_type(
971        &mut self,
972        func: &mut ir::Function,
973        ptr: ir::GlobalValue,
974        offset: u32,
975        flags: ir::MemFlags,
976        ptr_mem_ty: Option<ir::MemoryType>,
977    ) -> (ir::GlobalValue, Option<ir::MemoryType>) {
978        let pointee = func.create_global_value(ir::GlobalValueData::Load {
979            base: ptr,
980            offset: Offset32::new(i32::try_from(offset).unwrap()),
981            global_type: self.pointer_type(),
982            flags,
983        });
984
985        let pointee_mem_ty = ptr_mem_ty.map(|ptr_mem_ty| {
986            let pointee_mem_ty = self.create_empty_struct_memtype(func);
987            self.add_field_to_memtype(func, ptr_mem_ty, offset, pointee_mem_ty, flags.readonly());
988            func.global_value_facts[pointee] = Some(Fact::Mem {
989                ty: pointee_mem_ty,
990                min_offset: 0,
991                max_offset: 0,
992                nullable: false,
993            });
994            pointee_mem_ty
995        });
996
997        (pointee, pointee_mem_ty)
998    }
999
1000    /// Like `global_load_with_memory_type` but specialized for loads out of the
1001    /// `vmctx`.
1002    pub(crate) fn global_load_from_vmctx_with_memory_type(
1003        &mut self,
1004        func: &mut ir::Function,
1005        offset: u32,
1006        flags: ir::MemFlags,
1007    ) -> (ir::GlobalValue, Option<ir::MemoryType>) {
1008        let vmctx = self.vmctx(func);
1009        self.global_load_with_memory_type(func, vmctx, offset, flags, self.pcc_vmctx_memtype)
1010    }
1011
1012    /// Helper to emit a conditional trap based on `trap_cond`.
1013    ///
1014    /// This should only be used if `self.clif_instruction_traps_enabled()` is
1015    /// false, otherwise native CLIF instructions should be used instead.
1016    pub fn conditionally_trap(
1017        &mut self,
1018        builder: &mut FunctionBuilder,
1019        trap_cond: ir::Value,
1020        trap: ir::TrapCode,
1021    ) {
1022        assert!(!self.clif_instruction_traps_enabled());
1023
1024        let trap_block = builder.create_block();
1025        builder.set_cold_block(trap_block);
1026        let continuation_block = builder.create_block();
1027
1028        builder
1029            .ins()
1030            .brif(trap_cond, trap_block, &[], continuation_block, &[]);
1031
1032        builder.seal_block(trap_block);
1033        builder.seal_block(continuation_block);
1034
1035        builder.switch_to_block(trap_block);
1036        self.trap(builder, trap);
1037        builder.switch_to_block(continuation_block);
1038    }
1039
1040    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1041    /// test whether the divisor is zero.
1042    fn guard_zero_divisor(&mut self, builder: &mut FunctionBuilder, rhs: ir::Value) {
1043        if self.clif_instruction_traps_enabled() {
1044            return;
1045        }
1046        self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1047    }
1048
1049    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1050    /// test whether a signed division operation will raise a trap.
1051    fn guard_signed_divide(
1052        &mut self,
1053        builder: &mut FunctionBuilder,
1054        lhs: ir::Value,
1055        rhs: ir::Value,
1056    ) {
1057        if self.clif_instruction_traps_enabled() {
1058            return;
1059        }
1060        self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
1061
1062        let ty = builder.func.dfg.value_type(rhs);
1063        let minus_one = builder.ins().iconst(ty, -1);
1064        let rhs_is_minus_one = builder.ins().icmp(IntCC::Equal, rhs, minus_one);
1065        let int_min = builder.ins().iconst(
1066            ty,
1067            match ty {
1068                I32 => i64::from(i32::MIN),
1069                I64 => i64::MIN,
1070                _ => unreachable!(),
1071            },
1072        );
1073        let lhs_is_int_min = builder.ins().icmp(IntCC::Equal, lhs, int_min);
1074        let is_integer_overflow = builder.ins().band(rhs_is_minus_one, lhs_is_int_min);
1075        self.conditionally_trap(builder, is_integer_overflow, ir::TrapCode::INTEGER_OVERFLOW);
1076    }
1077
1078    /// Helper used when `!self.clif_instruction_traps_enabled()` is enabled to
1079    /// guard the traps from float-to-int conversions.
1080    fn guard_fcvt_to_int(
1081        &mut self,
1082        builder: &mut FunctionBuilder,
1083        ty: ir::Type,
1084        val: ir::Value,
1085        signed: bool,
1086    ) {
1087        assert!(!self.clif_instruction_traps_enabled());
1088        let val_ty = builder.func.dfg.value_type(val);
1089        let val = if val_ty == F64 {
1090            val
1091        } else {
1092            builder.ins().fpromote(F64, val)
1093        };
1094        let isnan = builder.ins().fcmp(FloatCC::NotEqual, val, val);
1095        self.trapnz(builder, isnan, ir::TrapCode::BAD_CONVERSION_TO_INTEGER);
1096        let val = self.trunc_f64(builder, val);
1097        let (lower_bound, upper_bound) = f64_cvt_to_int_bounds(signed, ty.bits());
1098        let lower_bound = builder.ins().f64const(lower_bound);
1099        let too_small = builder
1100            .ins()
1101            .fcmp(FloatCC::LessThanOrEqual, val, lower_bound);
1102        self.trapnz(builder, too_small, ir::TrapCode::INTEGER_OVERFLOW);
1103        let upper_bound = builder.ins().f64const(upper_bound);
1104        let too_large = builder
1105            .ins()
1106            .fcmp(FloatCC::GreaterThanOrEqual, val, upper_bound);
1107        self.trapnz(builder, too_large, ir::TrapCode::INTEGER_OVERFLOW);
1108    }
1109
1110    /// Get the `ir::Type` for a `VMSharedTypeIndex`.
1111    pub(crate) fn vmshared_type_index_ty(&self) -> Type {
1112        Type::int_with_byte_size(self.offsets.size_of_vmshared_type_index().into()).unwrap()
1113    }
1114
1115    /// Given a `ModuleInternedTypeIndex`, emit code to get the corresponding
1116    /// `VMSharedTypeIndex` at runtime.
1117    pub(crate) fn module_interned_to_shared_ty(
1118        &mut self,
1119        pos: &mut FuncCursor,
1120        interned_ty: ModuleInternedTypeIndex,
1121    ) -> ir::Value {
1122        let vmctx = self.vmctx_val(pos);
1123        let pointer_type = self.pointer_type();
1124        let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1125
1126        // Load the base pointer of the array of `VMSharedTypeIndex`es.
1127        let shared_indices = pos.ins().load(
1128            pointer_type,
1129            mem_flags,
1130            vmctx,
1131            i32::from(self.offsets.ptr.vmctx_type_ids_array()),
1132        );
1133
1134        // Calculate the offset in that array for this type's entry.
1135        let ty = self.vmshared_type_index_ty();
1136        let offset = i32::try_from(interned_ty.as_u32().checked_mul(ty.bytes()).unwrap()).unwrap();
1137
1138        // Load the`VMSharedTypeIndex` that this `ModuleInternedTypeIndex` is
1139        // associated with at runtime from the array.
1140        pos.ins().load(ty, mem_flags, shared_indices, offset)
1141    }
1142
1143    /// Load the associated `VMSharedTypeIndex` from inside a `*const VMFuncRef`.
1144    ///
1145    /// Does not check for null; just assumes that the `funcref` is a valid
1146    /// pointer.
1147    pub(crate) fn load_funcref_type_index(
1148        &mut self,
1149        pos: &mut FuncCursor,
1150        mem_flags: ir::MemFlags,
1151        funcref: ir::Value,
1152    ) -> ir::Value {
1153        let ty = self.vmshared_type_index_ty();
1154        pos.ins().load(
1155            ty,
1156            mem_flags,
1157            funcref,
1158            i32::from(self.offsets.ptr.vm_func_ref_type_index()),
1159        )
1160    }
1161
1162    /// Does this function need a GC heap?
1163    pub fn needs_gc_heap(&self) -> bool {
1164        self.needs_gc_heap
1165    }
1166
1167    /// Get the number of Wasm parameters for the given function.
1168    pub(crate) fn num_params_for_func(&self, function_index: FuncIndex) -> usize {
1169        let ty = self.module.functions[function_index]
1170            .signature
1171            .unwrap_module_type_index();
1172        self.types[ty].unwrap_func().params().len()
1173    }
1174
1175    /// Get the number of Wasm parameters for the given function type.
1176    ///
1177    /// Panics on non-function types.
1178    pub(crate) fn num_params_for_function_type(&self, type_index: TypeIndex) -> usize {
1179        let ty = self.module.types[type_index].unwrap_module_type_index();
1180        self.types[ty].unwrap_func().params().len()
1181    }
1182}
1183
1184#[derive(Default)]
1185pub(crate) struct WasmEntities {
1186    /// Map from a Wasm global index from this module to its implementation in
1187    /// the Cranelift function we are building.
1188    pub(crate) globals: SecondaryMap<GlobalIndex, Option<GlobalVariable>>,
1189
1190    /// Map from a Wasm memory index to its `Heap` implementation in the
1191    /// Cranelift function we are building.
1192    pub(crate) memories: SecondaryMap<MemoryIndex, PackedOption<Heap>>,
1193
1194    /// Map from an (interned) Wasm type index from this module to its
1195    /// `ir::SigRef` in the Cranelift function we are building.
1196    pub(crate) sig_refs: SecondaryMap<ModuleInternedTypeIndex, PackedOption<ir::SigRef>>,
1197
1198    /// Map from a defined Wasm function index to its associated function
1199    /// reference in the Cranelift function we are building.
1200    pub(crate) defined_func_refs: SecondaryMap<DefinedFuncIndex, PackedOption<ir::FuncRef>>,
1201
1202    /// Map from an imported Wasm function index for which we statically know
1203    /// which function will always be used to satisfy that import to its
1204    /// associated function reference in the Cranelift function we are building.
1205    pub(crate) imported_func_refs: SecondaryMap<FuncIndex, PackedOption<ir::FuncRef>>,
1206
1207    /// Map from a Wasm table index to its associated implementation in the
1208    /// Cranelift function we are building.
1209    pub(crate) tables: SecondaryMap<TableIndex, Option<TableData>>,
1210}
1211
1212macro_rules! define_get_or_create_methods {
1213    ( $( $name:ident ( $map:ident ) : $create:ident : $key:ty => $val:ty ; )* ) => {
1214        $(
1215            pub(crate) fn $name(&mut self, func: &mut ir::Function, key: $key) -> $val {
1216                match self.entities.$map[key].clone().into() {
1217                    Some(val) => val,
1218                    None => {
1219                        let val = self.$create(func, key);
1220                        self.entities.$map[key] = Some(val.clone()).into();
1221                        val
1222                    }
1223                }
1224            }
1225        )*
1226    };
1227}
1228
1229impl FuncEnvironment<'_> {
1230    define_get_or_create_methods! {
1231        get_or_create_global(globals) : make_global : GlobalIndex => GlobalVariable;
1232        get_or_create_heap(memories) : make_heap : MemoryIndex => Heap;
1233        get_or_create_interned_sig_ref(sig_refs) : make_sig_ref : ModuleInternedTypeIndex => ir::SigRef;
1234        get_or_create_defined_func_ref(defined_func_refs) : make_defined_func_ref : DefinedFuncIndex => ir::FuncRef;
1235        get_or_create_imported_func_ref(imported_func_refs) : make_imported_func_ref : FuncIndex => ir::FuncRef;
1236        get_or_create_table(tables) : make_table : TableIndex => TableData;
1237    }
1238
1239    fn make_global(&mut self, func: &mut ir::Function, index: GlobalIndex) -> GlobalVariable {
1240        let ty = self.module.globals[index].wasm_ty;
1241
1242        if ty.is_vmgcref_type() {
1243            // Although reference-typed globals live at the same memory location as
1244            // any other type of global at the same index would, getting or
1245            // setting them requires ref counting barriers. Therefore, we need
1246            // to use `GlobalVariable::Custom`, as that is the only kind of
1247            // `GlobalVariable` for which translation supports custom
1248            // access translation.
1249            return GlobalVariable::Custom;
1250        }
1251
1252        let (gv, offset) = self.get_global_location(func, index);
1253        GlobalVariable::Memory {
1254            gv,
1255            offset: offset.into(),
1256            ty: super::value_type(self.isa, ty),
1257        }
1258    }
1259
1260    pub(crate) fn get_or_create_sig_ref(
1261        &mut self,
1262        func: &mut ir::Function,
1263        ty: TypeIndex,
1264    ) -> ir::SigRef {
1265        let ty = self.module.types[ty].unwrap_module_type_index();
1266        self.get_or_create_interned_sig_ref(func, ty)
1267    }
1268
1269    fn make_sig_ref(
1270        &mut self,
1271        func: &mut ir::Function,
1272        index: ModuleInternedTypeIndex,
1273    ) -> ir::SigRef {
1274        let wasm_func_ty = self.types[index].unwrap_func();
1275        let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
1276        let sig_ref = func.import_signature(sig);
1277        self.sig_ref_to_ty[sig_ref] = Some(wasm_func_ty);
1278        sig_ref
1279    }
1280
1281    fn make_defined_func_ref(
1282        &mut self,
1283        func: &mut ir::Function,
1284        def_func_index: DefinedFuncIndex,
1285    ) -> ir::FuncRef {
1286        let func_index = self.module.func_index(def_func_index);
1287
1288        let ty = self.module.functions[func_index]
1289            .signature
1290            .unwrap_module_type_index();
1291        let signature = self.get_or_create_interned_sig_ref(func, ty);
1292
1293        let key = FuncKey::DefinedWasmFunction(self.translation.module_index(), def_func_index);
1294        let (namespace, index) = key.into_raw_parts();
1295        let name = ir::ExternalName::User(
1296            func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
1297        );
1298
1299        func.import_function(ir::ExtFuncData {
1300            name,
1301            signature,
1302            colocated: true,
1303        })
1304    }
1305
1306    fn make_imported_func_ref(
1307        &mut self,
1308        func: &mut ir::Function,
1309        func_index: FuncIndex,
1310    ) -> ir::FuncRef {
1311        assert!(self.module.is_imported_function(func_index));
1312        assert!(self.translation.known_imported_functions[func_index].is_some());
1313
1314        let ty = self.module.functions[func_index]
1315            .signature
1316            .unwrap_module_type_index();
1317        let signature = self.get_or_create_interned_sig_ref(func, ty);
1318
1319        let (module, def_func_index) =
1320            self.translation.known_imported_functions[func_index].unwrap();
1321        let key = FuncKey::DefinedWasmFunction(module, def_func_index);
1322        let (namespace, index) = key.into_raw_parts();
1323        let name = ir::ExternalName::User(
1324            func.declare_imported_user_function(ir::UserExternalName { namespace, index }),
1325        );
1326
1327        func.import_function(ir::ExtFuncData {
1328            name,
1329            signature,
1330            colocated: true,
1331        })
1332    }
1333
1334    fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> Heap {
1335        let pointer_type = self.pointer_type();
1336        let memory = self.module.memories[index];
1337        let is_shared = memory.shared;
1338
1339        let (base_ptr, base_offset, current_length_offset, ptr_memtype) = {
1340            let vmctx = self.vmctx(func);
1341            if let Some(def_index) = self.module.defined_memory_index(index) {
1342                if is_shared {
1343                    // As with imported memory, the `VMMemoryDefinition` for a
1344                    // shared memory is stored elsewhere. We store a `*mut
1345                    // VMMemoryDefinition` to it and dereference that when
1346                    // atomically growing it.
1347                    let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index);
1348                    let (memory, def_mt) = self.global_load_from_vmctx_with_memory_type(
1349                        func,
1350                        from_offset,
1351                        ir::MemFlags::trusted().with_readonly().with_can_move(),
1352                    );
1353                    let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1354                    let current_length_offset =
1355                        i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1356                    (memory, base_offset, current_length_offset, def_mt)
1357                } else {
1358                    let owned_index = self.module.owned_memory_index(def_index);
1359                    let owned_base_offset =
1360                        self.offsets.vmctx_vmmemory_definition_base(owned_index);
1361                    let owned_length_offset = self
1362                        .offsets
1363                        .vmctx_vmmemory_definition_current_length(owned_index);
1364                    let current_base_offset = i32::try_from(owned_base_offset).unwrap();
1365                    let current_length_offset = i32::try_from(owned_length_offset).unwrap();
1366                    (
1367                        vmctx,
1368                        current_base_offset,
1369                        current_length_offset,
1370                        self.pcc_vmctx_memtype,
1371                    )
1372                }
1373            } else {
1374                let from_offset = self.offsets.vmctx_vmmemory_import_from(index);
1375                let (memory, def_mt) = self.global_load_from_vmctx_with_memory_type(
1376                    func,
1377                    from_offset,
1378                    ir::MemFlags::trusted().with_readonly().with_can_move(),
1379                );
1380                let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
1381                let current_length_offset =
1382                    i32::from(self.offsets.ptr.vmmemory_definition_current_length());
1383                (memory, base_offset, current_length_offset, def_mt)
1384            }
1385        };
1386
1387        let bound = func.create_global_value(ir::GlobalValueData::Load {
1388            base: base_ptr,
1389            offset: Offset32::new(current_length_offset),
1390            global_type: pointer_type,
1391            flags: MemFlags::trusted(),
1392        });
1393
1394        let (base_fact, pcc_memory_type) = self.make_pcc_base_fact_and_type_for_memory(
1395            func,
1396            memory,
1397            base_offset,
1398            current_length_offset,
1399            ptr_memtype,
1400            bound,
1401        );
1402
1403        let base = self.make_heap_base(func, memory, base_ptr, base_offset, base_fact);
1404
1405        self.heaps.push(HeapData {
1406            base,
1407            bound,
1408            pcc_memory_type,
1409            memory,
1410        })
1411    }
1412
1413    pub(crate) fn make_heap_base(
1414        &self,
1415        func: &mut Function,
1416        memory: Memory,
1417        ptr: ir::GlobalValue,
1418        offset: i32,
1419        fact: Option<Fact>,
1420    ) -> ir::GlobalValue {
1421        let pointer_type = self.pointer_type();
1422
1423        let mut flags = ir::MemFlags::trusted().with_checked().with_can_move();
1424        if !memory.memory_may_move(self.tunables) {
1425            flags.set_readonly();
1426        }
1427
1428        let heap_base = func.create_global_value(ir::GlobalValueData::Load {
1429            base: ptr,
1430            offset: Offset32::new(offset),
1431            global_type: pointer_type,
1432            flags,
1433        });
1434        func.global_value_facts[heap_base] = fact;
1435        heap_base
1436    }
1437
1438    pub(crate) fn make_pcc_base_fact_and_type_for_memory(
1439        &mut self,
1440        func: &mut Function,
1441        memory: Memory,
1442        base_offset: i32,
1443        current_length_offset: i32,
1444        ptr_memtype: Option<ir::MemoryType>,
1445        heap_bound: ir::GlobalValue,
1446    ) -> (Option<Fact>, Option<ir::MemoryType>) {
1447        // If we have a declared maximum, we can make this a "static" heap, which is
1448        // allocated up front and never moved.
1449        let host_page_size_log2 = self.target_config().page_size_align_log2;
1450        let (base_fact, memory_type) = if !memory
1451            .can_elide_bounds_check(self.tunables, host_page_size_log2)
1452        {
1453            if let Some(ptr_memtype) = ptr_memtype {
1454                // Create a memtype representing the untyped memory region.
1455                let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory {
1456                    gv: heap_bound,
1457                    size: self.tunables.memory_guard_size,
1458                });
1459                // This fact applies to any pointer to the start of the memory.
1460                let base_fact = ir::Fact::dynamic_base_ptr(data_mt);
1461                // This fact applies to the length.
1462                let length_fact = ir::Fact::global_value(
1463                    u16::try_from(self.isa.pointer_type().bits()).unwrap(),
1464                    heap_bound,
1465                );
1466                // Create a field in the vmctx for the base pointer.
1467                match &mut func.memory_types[ptr_memtype] {
1468                    ir::MemoryTypeData::Struct { size, fields } => {
1469                        let base_offset = u64::try_from(base_offset).unwrap();
1470                        fields.push(ir::MemoryTypeField {
1471                            offset: base_offset,
1472                            ty: self.isa.pointer_type(),
1473                            // Read-only field from the PoV of PCC checks:
1474                            // don't allow stores to this field. (Even if
1475                            // it is a dynamic memory whose base can
1476                            // change, that update happens inside the
1477                            // runtime, not in generated code.)
1478                            readonly: true,
1479                            fact: Some(base_fact.clone()),
1480                        });
1481                        let current_length_offset = u64::try_from(current_length_offset).unwrap();
1482                        fields.push(ir::MemoryTypeField {
1483                            offset: current_length_offset,
1484                            ty: self.isa.pointer_type(),
1485                            // As above, read-only; only the runtime modifies it.
1486                            readonly: true,
1487                            fact: Some(length_fact),
1488                        });
1489
1490                        let pointer_size = u64::from(self.isa.pointer_type().bytes());
1491                        let fields_end = std::cmp::max(
1492                            base_offset + pointer_size,
1493                            current_length_offset + pointer_size,
1494                        );
1495                        *size = std::cmp::max(*size, fields_end);
1496                    }
1497                    _ => {
1498                        panic!("Bad memtype");
1499                    }
1500                }
1501                // Apply a fact to the base pointer.
1502                (Some(base_fact), Some(data_mt))
1503            } else {
1504                (None, None)
1505            }
1506        } else {
1507            if let Some(ptr_memtype) = ptr_memtype {
1508                // Create a memtype representing the untyped memory region.
1509                let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory {
1510                    size: self
1511                        .tunables
1512                        .memory_reservation
1513                        .checked_add(self.tunables.memory_guard_size)
1514                        .expect("Memory plan has overflowing size plus guard"),
1515                });
1516                // This fact applies to any pointer to the start of the memory.
1517                let base_fact = Fact::Mem {
1518                    ty: data_mt,
1519                    min_offset: 0,
1520                    max_offset: 0,
1521                    nullable: false,
1522                };
1523                // Create a field in the vmctx for the base pointer.
1524                match &mut func.memory_types[ptr_memtype] {
1525                    ir::MemoryTypeData::Struct { size, fields } => {
1526                        let offset = u64::try_from(base_offset).unwrap();
1527                        fields.push(ir::MemoryTypeField {
1528                            offset,
1529                            ty: self.isa.pointer_type(),
1530                            // Read-only field from the PoV of PCC checks:
1531                            // don't allow stores to this field. (Even if
1532                            // it is a dynamic memory whose base can
1533                            // change, that update happens inside the
1534                            // runtime, not in generated code.)
1535                            readonly: true,
1536                            fact: Some(base_fact.clone()),
1537                        });
1538                        *size = std::cmp::max(
1539                            *size,
1540                            offset + u64::from(self.isa.pointer_type().bytes()),
1541                        );
1542                    }
1543                    _ => {
1544                        panic!("Bad memtype");
1545                    }
1546                }
1547                // Apply a fact to the base pointer.
1548                (Some(base_fact), Some(data_mt))
1549            } else {
1550                (None, None)
1551            }
1552        };
1553        (base_fact, memory_type)
1554    }
1555
1556    fn make_table(&mut self, func: &mut ir::Function, index: TableIndex) -> TableData {
1557        let pointer_type = self.pointer_type();
1558
1559        let (ptr, base_offset, current_elements_offset) = {
1560            let vmctx = self.vmctx(func);
1561            if let Some(def_index) = self.module.defined_table_index(index) {
1562                let base_offset =
1563                    i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap();
1564                let current_elements_offset = i32::try_from(
1565                    self.offsets
1566                        .vmctx_vmtable_definition_current_elements(def_index),
1567                )
1568                .unwrap();
1569                (vmctx, base_offset, current_elements_offset)
1570            } else {
1571                let from_offset = self.offsets.vmctx_vmtable_from(index);
1572                let table = func.create_global_value(ir::GlobalValueData::Load {
1573                    base: vmctx,
1574                    offset: Offset32::new(i32::try_from(from_offset).unwrap()),
1575                    global_type: pointer_type,
1576                    flags: MemFlags::trusted().with_readonly().with_can_move(),
1577                });
1578                let base_offset = i32::from(self.offsets.vmtable_definition_base());
1579                let current_elements_offset =
1580                    i32::from(self.offsets.vmtable_definition_current_elements());
1581                (table, base_offset, current_elements_offset)
1582            }
1583        };
1584
1585        let table = &self.module.tables[index];
1586        let element_size = if table.ref_type.is_vmgcref_type() {
1587            // For GC-managed references, tables store `Option<VMGcRef>`s.
1588            ir::types::I32.bytes()
1589        } else {
1590            self.reference_type(table.ref_type.heap_type).0.bytes()
1591        };
1592
1593        let base_gv = func.create_global_value(ir::GlobalValueData::Load {
1594            base: ptr,
1595            offset: Offset32::new(base_offset),
1596            global_type: pointer_type,
1597            flags: if Some(table.limits.min) == table.limits.max {
1598                // A fixed-size table can't be resized so its base address won't
1599                // change.
1600                MemFlags::trusted().with_readonly().with_can_move()
1601            } else {
1602                MemFlags::trusted()
1603            },
1604        });
1605
1606        let bound = if Some(table.limits.min) == table.limits.max {
1607            TableSize::Static {
1608                bound: table.limits.min,
1609            }
1610        } else {
1611            TableSize::Dynamic {
1612                bound_gv: func.create_global_value(ir::GlobalValueData::Load {
1613                    base: ptr,
1614                    offset: Offset32::new(current_elements_offset),
1615                    global_type: ir::Type::int(
1616                        u16::from(self.offsets.size_of_vmtable_definition_current_elements()) * 8,
1617                    )
1618                    .unwrap(),
1619                    flags: MemFlags::trusted(),
1620                }),
1621            }
1622        };
1623
1624        TableData {
1625            base_gv,
1626            bound,
1627            element_size,
1628        }
1629    }
1630
1631    /// Get the type index associated with an exception object.
1632    #[cfg(feature = "gc")]
1633    pub(crate) fn exception_type_from_tag(&self, tag: TagIndex) -> EngineOrModuleTypeIndex {
1634        self.module.tags[tag].exception
1635    }
1636
1637    /// Get the parameter arity of the associated function type for the given tag.
1638    pub(crate) fn tag_param_arity(&self, tag: TagIndex) -> usize {
1639        let func_ty = self.module.tags[tag].signature.unwrap_module_type_index();
1640        let func_ty = self
1641            .types
1642            .unwrap_func(func_ty)
1643            .expect("already validated to refer to a function type");
1644        func_ty.params().len()
1645    }
1646
1647    /// Get the runtime instance ID and defined-tag ID in that
1648    /// instance for a particular static tag ID.
1649    #[cfg(feature = "gc")]
1650    pub(crate) fn get_instance_and_tag(
1651        &mut self,
1652        builder: &mut FunctionBuilder<'_>,
1653        tag_index: TagIndex,
1654    ) -> (ir::Value, ir::Value) {
1655        if let Some(defined_tag_index) = self.module.defined_tag_index(tag_index) {
1656            // Our own tag -- we only need to get our instance ID.
1657            let builtin = self.builtin_functions.get_instance_id(builder.func);
1658            let vmctx = self.vmctx_val(&mut builder.cursor());
1659            let call = builder.ins().call(builtin, &[vmctx]);
1660            let instance_id = builder.func.dfg.inst_results(call)[0];
1661            let tag_id = builder
1662                .ins()
1663                .iconst(I32, i64::from(defined_tag_index.as_u32()));
1664            (instance_id, tag_id)
1665        } else {
1666            // An imported tag -- we need to load the VMTagImport struct.
1667            let vmctx_tag_vmctx_offset = self.offsets.vmctx_vmtag_import_vmctx(tag_index);
1668            let vmctx_tag_index_offset = self.offsets.vmctx_vmtag_import_index(tag_index);
1669            let vmctx = self.vmctx_val(&mut builder.cursor());
1670            let pointer_type = self.pointer_type();
1671            let from_vmctx = builder.ins().load(
1672                pointer_type,
1673                MemFlags::trusted().with_readonly(),
1674                vmctx,
1675                i32::try_from(vmctx_tag_vmctx_offset).unwrap(),
1676            );
1677            let index = builder.ins().load(
1678                I32,
1679                MemFlags::trusted().with_readonly(),
1680                vmctx,
1681                i32::try_from(vmctx_tag_index_offset).unwrap(),
1682            );
1683            let builtin = self.builtin_functions.get_instance_id(builder.func);
1684            let call = builder.ins().call(builtin, &[from_vmctx]);
1685            let from_instance_id = builder.func.dfg.inst_results(call)[0];
1686            (from_instance_id, index)
1687        }
1688    }
1689}
1690
1691struct Call<'a, 'func, 'module_env> {
1692    builder: &'a mut FunctionBuilder<'func>,
1693    env: &'a mut FuncEnvironment<'module_env>,
1694    handlers: Vec<(Option<ExceptionTag>, Block)>,
1695    tail: bool,
1696}
1697
1698enum CheckIndirectCallTypeSignature {
1699    Runtime,
1700    StaticMatch {
1701        /// Whether or not the funcref may be null or if it's statically known
1702        /// to not be null.
1703        may_be_null: bool,
1704    },
1705    StaticTrap,
1706}
1707
1708type CallRets = SmallVec<[ir::Value; 4]>;
1709
1710impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> {
1711    /// Create a new `Call` site that will do regular, non-tail calls.
1712    pub fn new(
1713        builder: &'a mut FunctionBuilder<'func>,
1714        env: &'a mut FuncEnvironment<'module_env>,
1715        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
1716    ) -> Self {
1717        let handlers = handlers.into_iter().collect();
1718        Call {
1719            builder,
1720            env,
1721            handlers,
1722            tail: false,
1723        }
1724    }
1725
1726    /// Create a new `Call` site that will perform tail calls.
1727    pub fn new_tail(
1728        builder: &'a mut FunctionBuilder<'func>,
1729        env: &'a mut FuncEnvironment<'module_env>,
1730    ) -> Self {
1731        Call {
1732            builder,
1733            env,
1734            handlers: vec![],
1735            tail: true,
1736        }
1737    }
1738
1739    /// Do a Wasm-level direct call to the given callee function.
1740    pub fn direct_call(
1741        mut self,
1742        callee_index: FuncIndex,
1743        sig_ref: ir::SigRef,
1744        call_args: &[ir::Value],
1745    ) -> WasmResult<CallRets> {
1746        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
1747        let caller_vmctx = self
1748            .builder
1749            .func
1750            .special_param(ArgumentPurpose::VMContext)
1751            .unwrap();
1752
1753        // Handle direct calls to locally-defined functions.
1754        if let Some(def_func_index) = self.env.module.defined_func_index(callee_index) {
1755            // First append the callee vmctx address, which is the same as the caller vmctx in
1756            // this case.
1757            real_call_args.push(caller_vmctx);
1758
1759            // Then append the caller vmctx address.
1760            real_call_args.push(caller_vmctx);
1761
1762            // Then append the regular call arguments.
1763            real_call_args.extend_from_slice(call_args);
1764
1765            // Finally, make the direct call!
1766            let callee = self
1767                .env
1768                .get_or_create_defined_func_ref(self.builder.func, def_func_index);
1769            return Ok(self.direct_call_inst(callee, &real_call_args));
1770        }
1771
1772        // Handle direct calls to imported functions. We use an indirect call
1773        // so that we don't have to patch the code at runtime.
1774        let pointer_type = self.env.pointer_type();
1775        let vmctx = self.env.vmctx(self.builder.func);
1776        let base = self.builder.ins().global_value(pointer_type, vmctx);
1777
1778        let mem_flags = ir::MemFlags::trusted().with_readonly().with_can_move();
1779
1780        // Load the callee address.
1781        let body_offset = i32::try_from(
1782            self.env
1783                .offsets
1784                .vmctx_vmfunction_import_wasm_call(callee_index),
1785        )
1786        .unwrap();
1787
1788        // First append the callee vmctx address.
1789        let vmctx_offset =
1790            i32::try_from(self.env.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap();
1791        let callee_vmctx = self
1792            .builder
1793            .ins()
1794            .load(pointer_type, mem_flags, base, vmctx_offset);
1795        real_call_args.push(callee_vmctx);
1796        real_call_args.push(caller_vmctx);
1797
1798        // Then append the regular call arguments.
1799        real_call_args.extend_from_slice(call_args);
1800
1801        // If we statically know the imported function (e.g. this is a
1802        // component-to-component call where we statically know both components)
1803        // then we can actually still make a direct call (although we do have to
1804        // pass the callee's vmctx that we just loaded, not our own). Otherwise,
1805        // we really do an indirect call.
1806        if self.env.translation.known_imported_functions[callee_index].is_some() {
1807            let callee = self
1808                .env
1809                .get_or_create_imported_func_ref(self.builder.func, callee_index);
1810            Ok(self.direct_call_inst(callee, &real_call_args))
1811        } else {
1812            let func_addr = self
1813                .builder
1814                .ins()
1815                .load(pointer_type, mem_flags, base, body_offset);
1816            Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
1817        }
1818    }
1819
1820    /// Do a Wasm-level indirect call through the given funcref table.
1821    pub fn indirect_call(
1822        mut self,
1823        features: &WasmFeatures,
1824        table_index: TableIndex,
1825        ty_index: TypeIndex,
1826        sig_ref: ir::SigRef,
1827        callee: ir::Value,
1828        call_args: &[ir::Value],
1829    ) -> WasmResult<Option<CallRets>> {
1830        let (code_ptr, callee_vmctx) = match self.check_and_load_code_and_callee_vmctx(
1831            features,
1832            table_index,
1833            ty_index,
1834            callee,
1835            false,
1836        )? {
1837            Some(pair) => pair,
1838            None => return Ok(None),
1839        };
1840
1841        self.unchecked_call_impl(sig_ref, code_ptr, callee_vmctx, call_args)
1842            .map(Some)
1843    }
1844
1845    fn check_and_load_code_and_callee_vmctx(
1846        &mut self,
1847        features: &WasmFeatures,
1848        table_index: TableIndex,
1849        ty_index: TypeIndex,
1850        callee: ir::Value,
1851        cold_blocks: bool,
1852    ) -> WasmResult<Option<(ir::Value, ir::Value)>> {
1853        // Get the funcref pointer from the table.
1854        let funcref_ptr = self.env.get_or_init_func_ref_table_elem(
1855            self.builder,
1856            table_index,
1857            callee,
1858            cold_blocks,
1859        );
1860
1861        // If necessary, check the signature.
1862        let check =
1863            self.check_indirect_call_type_signature(features, table_index, ty_index, funcref_ptr);
1864
1865        let trap_code = match check {
1866            // `funcref_ptr` is checked at runtime that its type matches,
1867            // meaning that if code gets this far it's guaranteed to not be
1868            // null. That means nothing in `unchecked_call` can fail.
1869            CheckIndirectCallTypeSignature::Runtime => None,
1870
1871            // No type check was performed on `funcref_ptr` because it's
1872            // statically known to have the right type. Note that whether or
1873            // not the function is null is not necessarily tested so far since
1874            // no type information was inspected.
1875            //
1876            // If the table may hold null functions, then further loads in
1877            // `unchecked_call` may fail. If the table only holds non-null
1878            // functions, though, then there's no possibility of a trap.
1879            CheckIndirectCallTypeSignature::StaticMatch { may_be_null } => {
1880                if may_be_null {
1881                    Some(crate::TRAP_INDIRECT_CALL_TO_NULL)
1882                } else {
1883                    None
1884                }
1885            }
1886
1887            // Code has already trapped, so return nothing indicating that this
1888            // is now unreachable code.
1889            CheckIndirectCallTypeSignature::StaticTrap => return Ok(None),
1890        };
1891
1892        Ok(Some(self.load_code_and_vmctx(funcref_ptr, trap_code)))
1893    }
1894
1895    fn check_indirect_call_type_signature(
1896        &mut self,
1897        features: &WasmFeatures,
1898        table_index: TableIndex,
1899        ty_index: TypeIndex,
1900        funcref_ptr: ir::Value,
1901    ) -> CheckIndirectCallTypeSignature {
1902        let table = &self.env.module.tables[table_index];
1903        let sig_id_size = self.env.offsets.size_of_vmshared_type_index();
1904        let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
1905
1906        // Test if a type check is necessary for this table. If this table is a
1907        // table of typed functions and that type matches `ty_index`, then
1908        // there's no need to perform a typecheck.
1909        match table.ref_type.heap_type {
1910            // Functions do not have a statically known type in the table, a
1911            // typecheck is required. Fall through to below to perform the
1912            // actual typecheck.
1913            WasmHeapType::Func => {}
1914
1915            // Functions that have a statically known type are either going to
1916            // always succeed or always fail. Figure out by inspecting the types
1917            // further.
1918            WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Module(table_ty)) => {
1919                // If `ty_index` matches `table_ty`, then this call is
1920                // statically known to have the right type, so no checks are
1921                // necessary.
1922                let specified_ty = self.env.module.types[ty_index].unwrap_module_type_index();
1923                if specified_ty == table_ty {
1924                    return CheckIndirectCallTypeSignature::StaticMatch {
1925                        may_be_null: table.ref_type.nullable,
1926                    };
1927                }
1928
1929                if features.gc() {
1930                    // If we are in the Wasm GC world, then we need to perform
1931                    // an actual subtype check at runtime. Fall through to below
1932                    // to do that.
1933                } else {
1934                    // Otherwise if the types don't match then either (a) this
1935                    // is a null pointer or (b) it's a pointer with the wrong
1936                    // type. Figure out which and trap here.
1937                    //
1938                    // If it's possible to have a null here then try to load the
1939                    // type information. If that fails due to the function being
1940                    // a null pointer, then this was a call to null. Otherwise
1941                    // if it succeeds then we know it won't match, so trap
1942                    // anyway.
1943                    if table.ref_type.nullable {
1944                        if self.env.clif_memory_traps_enabled() {
1945                            self.builder.ins().load(
1946                                sig_id_type,
1947                                ir::MemFlags::trusted()
1948                                    .with_readonly()
1949                                    .with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL)),
1950                                funcref_ptr,
1951                                i32::from(self.env.offsets.ptr.vm_func_ref_type_index()),
1952                            );
1953                        } else {
1954                            self.env.trapz(
1955                                self.builder,
1956                                funcref_ptr,
1957                                crate::TRAP_INDIRECT_CALL_TO_NULL,
1958                            );
1959                        }
1960                    }
1961                    self.env.trap(self.builder, crate::TRAP_BAD_SIGNATURE);
1962                    return CheckIndirectCallTypeSignature::StaticTrap;
1963                }
1964            }
1965
1966            // Tables of `nofunc` can only be inhabited by null, so go ahead and
1967            // trap with that.
1968            WasmHeapType::NoFunc => {
1969                assert!(table.ref_type.nullable);
1970                self.env
1971                    .trap(self.builder, crate::TRAP_INDIRECT_CALL_TO_NULL);
1972                return CheckIndirectCallTypeSignature::StaticTrap;
1973            }
1974
1975            // Engine-indexed types don't show up until runtime and it's a Wasm
1976            // validation error to perform a call through a non-function table,
1977            // so these cases are dynamically not reachable.
1978            WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Engine(_))
1979            | WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::RecGroup(_))
1980            | WasmHeapType::Extern
1981            | WasmHeapType::NoExtern
1982            | WasmHeapType::Any
1983            | WasmHeapType::Eq
1984            | WasmHeapType::I31
1985            | WasmHeapType::Array
1986            | WasmHeapType::ConcreteArray(_)
1987            | WasmHeapType::Struct
1988            | WasmHeapType::ConcreteStruct(_)
1989            | WasmHeapType::Exn
1990            | WasmHeapType::ConcreteExn(_)
1991            | WasmHeapType::NoExn
1992            | WasmHeapType::Cont
1993            | WasmHeapType::ConcreteCont(_)
1994            | WasmHeapType::NoCont
1995            | WasmHeapType::None => {
1996                unreachable!()
1997            }
1998        }
1999
2000        // Load the caller's `VMSharedTypeIndex.
2001        let interned_ty = self.env.module.types[ty_index].unwrap_module_type_index();
2002        let caller_sig_id = self
2003            .env
2004            .module_interned_to_shared_ty(&mut self.builder.cursor(), interned_ty);
2005
2006        // Load the callee's `VMSharedTypeIndex`.
2007        //
2008        // Note that the callee may be null in which case this load may
2009        // trap. If so use the `TRAP_INDIRECT_CALL_TO_NULL` trap code.
2010        let mut mem_flags = ir::MemFlags::trusted().with_readonly();
2011        if self.env.clif_memory_traps_enabled() {
2012            mem_flags = mem_flags.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL));
2013        } else {
2014            self.env
2015                .trapz(self.builder, funcref_ptr, crate::TRAP_INDIRECT_CALL_TO_NULL);
2016        }
2017        let callee_sig_id =
2018            self.env
2019                .load_funcref_type_index(&mut self.builder.cursor(), mem_flags, funcref_ptr);
2020
2021        // Check that they match: in the case of Wasm GC, this means doing a
2022        // full subtype check. Otherwise, we do a simple equality check.
2023        let matches = if features.gc() {
2024            #[cfg(feature = "gc")]
2025            {
2026                self.env
2027                    .is_subtype(self.builder, callee_sig_id, caller_sig_id)
2028            }
2029            #[cfg(not(feature = "gc"))]
2030            {
2031                unreachable!()
2032            }
2033        } else {
2034            self.builder
2035                .ins()
2036                .icmp(IntCC::Equal, callee_sig_id, caller_sig_id)
2037        };
2038        self.env
2039            .trapz(self.builder, matches, crate::TRAP_BAD_SIGNATURE);
2040        CheckIndirectCallTypeSignature::Runtime
2041    }
2042
2043    /// Call a typed function reference.
2044    pub fn call_ref(
2045        self,
2046        sig_ref: ir::SigRef,
2047        callee: ir::Value,
2048        args: &[ir::Value],
2049    ) -> WasmResult<CallRets> {
2050        // FIXME: the wasm type system tracks enough information to know whether
2051        // `callee` is a null reference or not. In some situations it can be
2052        // statically known here that `callee` cannot be null in which case this
2053        // can be `None` instead. This requires feeding type information from
2054        // wasmparser's validator into this function, however, which is not
2055        // easily done at this time.
2056        let callee_load_trap_code = Some(crate::TRAP_NULL_REFERENCE);
2057
2058        self.unchecked_call(sig_ref, callee, callee_load_trap_code, args)
2059    }
2060
2061    /// This calls a function by reference without checking the signature.
2062    ///
2063    /// It gets the function address, sets relevant flags, and passes the
2064    /// special callee/caller vmctxs. It is used by both call_indirect (which
2065    /// checks the signature) and call_ref (which doesn't).
2066    fn unchecked_call(
2067        mut self,
2068        sig_ref: ir::SigRef,
2069        callee: ir::Value,
2070        callee_load_trap_code: Option<ir::TrapCode>,
2071        call_args: &[ir::Value],
2072    ) -> WasmResult<CallRets> {
2073        let (func_addr, callee_vmctx) = self.load_code_and_vmctx(callee, callee_load_trap_code);
2074        self.unchecked_call_impl(sig_ref, func_addr, callee_vmctx, call_args)
2075    }
2076
2077    fn load_code_and_vmctx(
2078        &mut self,
2079        callee: ir::Value,
2080        callee_load_trap_code: Option<ir::TrapCode>,
2081    ) -> (ir::Value, ir::Value) {
2082        let pointer_type = self.env.pointer_type();
2083
2084        // Dereference callee pointer to get the function address.
2085        //
2086        // Note that this may trap if `callee` hasn't previously been verified
2087        // to be non-null. This means that this load is annotated with an
2088        // optional trap code provided by the caller of `unchecked_call` which
2089        // will handle the case where this is either already known to be
2090        // non-null or may trap.
2091        let mem_flags = ir::MemFlags::trusted().with_readonly();
2092        let mut callee_flags = mem_flags;
2093        if self.env.clif_memory_traps_enabled() {
2094            callee_flags = callee_flags.with_trap_code(callee_load_trap_code);
2095        } else {
2096            if let Some(trap) = callee_load_trap_code {
2097                self.env.trapz(self.builder, callee, trap);
2098            }
2099        }
2100        let func_addr = self.builder.ins().load(
2101            pointer_type,
2102            callee_flags,
2103            callee,
2104            i32::from(self.env.offsets.ptr.vm_func_ref_wasm_call()),
2105        );
2106        let callee_vmctx = self.builder.ins().load(
2107            pointer_type,
2108            mem_flags,
2109            callee,
2110            i32::from(self.env.offsets.ptr.vm_func_ref_vmctx()),
2111        );
2112
2113        (func_addr, callee_vmctx)
2114    }
2115
2116    fn caller_vmctx(&self) -> ir::Value {
2117        self.builder
2118            .func
2119            .special_param(ArgumentPurpose::VMContext)
2120            .unwrap()
2121    }
2122
2123    /// This calls a function by reference without checking the
2124    /// signature, given the raw code pointer to the
2125    /// Wasm-calling-convention entry point and the callee vmctx.
2126    fn unchecked_call_impl(
2127        mut self,
2128        sig_ref: ir::SigRef,
2129        func_addr: ir::Value,
2130        callee_vmctx: ir::Value,
2131        call_args: &[ir::Value],
2132    ) -> WasmResult<CallRets> {
2133        let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
2134        let caller_vmctx = self.caller_vmctx();
2135
2136        // First append the callee and caller vmctx addresses.
2137        real_call_args.push(callee_vmctx);
2138        real_call_args.push(caller_vmctx);
2139
2140        // Then append the regular call arguments.
2141        real_call_args.extend_from_slice(call_args);
2142
2143        Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
2144    }
2145
2146    fn exception_table(
2147        &mut self,
2148        sig: ir::SigRef,
2149    ) -> Option<(ir::ExceptionTable, Block, CallRets)> {
2150        if self.handlers.len() > 0 {
2151            let continuation_block = self.builder.create_block();
2152            let mut args = vec![];
2153            let mut results = smallvec![];
2154            for i in 0..self.builder.func.dfg.signatures[sig].returns.len() {
2155                let ty = self.builder.func.dfg.signatures[sig].returns[i].value_type;
2156                results.push(
2157                    self.builder
2158                        .func
2159                        .dfg
2160                        .append_block_param(continuation_block, ty),
2161                );
2162                args.push(BlockArg::TryCallRet(u32::try_from(i).unwrap()));
2163            }
2164
2165            let continuation = self
2166                .builder
2167                .func
2168                .dfg
2169                .block_call(continuation_block, args.iter());
2170            let mut handlers = vec![ExceptionTableItem::Context(self.caller_vmctx())];
2171            for (tag, block) in &self.handlers {
2172                let block_call = self
2173                    .builder
2174                    .func
2175                    .dfg
2176                    .block_call(*block, &[BlockArg::TryCallExn(0)]);
2177                handlers.push(match tag {
2178                    Some(tag) => ExceptionTableItem::Tag(*tag, block_call),
2179                    None => ExceptionTableItem::Default(block_call),
2180                });
2181            }
2182            let etd = ExceptionTableData::new(sig, continuation, handlers);
2183            let et = self.builder.func.dfg.exception_tables.push(etd);
2184            Some((et, continuation_block, results))
2185        } else {
2186            None
2187        }
2188    }
2189
2190    fn results_from_call_inst(&self, inst: ir::Inst) -> CallRets {
2191        self.builder
2192            .func
2193            .dfg
2194            .inst_results(inst)
2195            .iter()
2196            .copied()
2197            .collect()
2198    }
2199
2200    fn handle_call_result_stackmap(&mut self, results: &[ir::Value], sig_ref: ir::SigRef) {
2201        for (i, &val) in results.iter().enumerate() {
2202            if self.env.sig_ref_result_needs_stack_map(sig_ref, i) {
2203                self.builder.declare_value_needs_stack_map(val);
2204            }
2205        }
2206    }
2207
2208    fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> CallRets {
2209        let sig_ref = self.builder.func.dfg.ext_funcs[callee].signature;
2210        if self.tail {
2211            self.builder.ins().return_call(callee, args);
2212            smallvec![]
2213        } else if let Some((exception_table, continuation_block, results)) =
2214            self.exception_table(sig_ref)
2215        {
2216            self.builder.ins().try_call(callee, args, exception_table);
2217            self.handle_call_result_stackmap(&results, sig_ref);
2218            self.builder.switch_to_block(continuation_block);
2219            self.builder.seal_block(continuation_block);
2220            results
2221        } else {
2222            let inst = self.builder.ins().call(callee, args);
2223            let results = self.results_from_call_inst(inst);
2224            self.handle_call_result_stackmap(&results, sig_ref);
2225            results
2226        }
2227    }
2228
2229    fn indirect_call_inst(
2230        &mut self,
2231        sig_ref: ir::SigRef,
2232        func_addr: ir::Value,
2233        args: &[ir::Value],
2234    ) -> CallRets {
2235        if self.tail {
2236            self.builder
2237                .ins()
2238                .return_call_indirect(sig_ref, func_addr, args);
2239            smallvec![]
2240        } else if let Some((exception_table, continuation_block, results)) =
2241            self.exception_table(sig_ref)
2242        {
2243            self.builder
2244                .ins()
2245                .try_call_indirect(func_addr, args, exception_table);
2246            self.handle_call_result_stackmap(&results, sig_ref);
2247            self.builder.switch_to_block(continuation_block);
2248            self.builder.seal_block(continuation_block);
2249            results
2250        } else {
2251            let inst = self.builder.ins().call_indirect(sig_ref, func_addr, args);
2252            let results = self.results_from_call_inst(inst);
2253            self.handle_call_result_stackmap(&results, sig_ref);
2254            results
2255        }
2256    }
2257}
2258
2259impl TypeConvert for FuncEnvironment<'_> {
2260    fn lookup_heap_type(&self, ty: wasmparser::UnpackedIndex) -> WasmHeapType {
2261        wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
2262            self.module.types[idx].unwrap_module_type_index()
2263        })
2264        .lookup_heap_type(ty)
2265    }
2266
2267    fn lookup_type_index(&self, index: wasmparser::UnpackedIndex) -> EngineOrModuleTypeIndex {
2268        wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
2269            self.module.types[idx].unwrap_module_type_index()
2270        })
2271        .lookup_type_index(index)
2272    }
2273}
2274
2275impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
2276    fn target_config(&self) -> TargetFrontendConfig {
2277        self.isa.frontend_config()
2278    }
2279
2280    fn reference_type(&self, wasm_ty: WasmHeapType) -> (ir::Type, bool) {
2281        let ty = crate::reference_type(wasm_ty, self.pointer_type());
2282        let needs_stack_map = match wasm_ty.top() {
2283            WasmHeapTopType::Extern | WasmHeapTopType::Any | WasmHeapTopType::Exn => true,
2284            WasmHeapTopType::Func => false,
2285            // TODO(#10248) Once continuations can be stored on the GC heap, we
2286            // will need stack maps for continuation objects.
2287            WasmHeapTopType::Cont => false,
2288        };
2289        (ty, needs_stack_map)
2290    }
2291
2292    fn heap_access_spectre_mitigation(&self) -> bool {
2293        self.isa.flags().enable_heap_access_spectre_mitigation()
2294    }
2295
2296    fn proof_carrying_code(&self) -> bool {
2297        self.isa.flags().enable_pcc()
2298    }
2299
2300    fn tunables(&self) -> &Tunables {
2301        self.compiler.tunables()
2302    }
2303}
2304
2305impl FuncEnvironment<'_> {
2306    pub fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
2307        &self.heaps
2308    }
2309
2310    pub fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool {
2311        // The first two parameters are the vmctx and caller vmctx. The rest are
2312        // the wasm parameters.
2313        index >= 2
2314    }
2315
2316    pub fn param_needs_stack_map(&self, _signature: &ir::Signature, index: usize) -> bool {
2317        // Skip the caller and callee vmctx.
2318        if index < 2 {
2319            return false;
2320        }
2321
2322        self.wasm_func_ty.params()[index - 2].is_vmgcref_type_and_not_i31()
2323    }
2324
2325    pub fn sig_ref_result_needs_stack_map(&self, sig_ref: ir::SigRef, index: usize) -> bool {
2326        let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
2327        wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
2328    }
2329
2330    pub fn translate_table_grow(
2331        &mut self,
2332        builder: &mut FunctionBuilder<'_>,
2333        table_index: TableIndex,
2334        delta: ir::Value,
2335        init_value: ir::Value,
2336    ) -> WasmResult<ir::Value> {
2337        let mut pos = builder.cursor();
2338        let table = self.table(table_index);
2339        let ty = table.ref_type.heap_type;
2340        let (table_vmctx, defined_table_index) =
2341            self.table_vmctx_and_defined_index(&mut pos, table_index);
2342        let index_type = table.idx_type;
2343        let delta = self.cast_index_to_i64(&mut pos, delta, index_type);
2344
2345        let mut args: SmallVec<[_; 6]> = smallvec![table_vmctx, defined_table_index, delta];
2346        let grow = match ty.top() {
2347            WasmHeapTopType::Extern | WasmHeapTopType::Any | WasmHeapTopType::Exn => {
2348                args.push(init_value);
2349                gc::builtins::table_grow_gc_ref(self, pos.func)?
2350            }
2351            WasmHeapTopType::Func => {
2352                args.push(init_value);
2353                self.builtin_functions.table_grow_func_ref(pos.func)
2354            }
2355            WasmHeapTopType::Cont => {
2356                let (revision, contref) =
2357                    stack_switching::fatpointer::deconstruct(self, &mut pos, init_value);
2358                args.extend_from_slice(&[contref, revision]);
2359                stack_switching::builtins::table_grow_cont_obj(self, pos.func)?
2360            }
2361        };
2362
2363        let call_inst = pos.ins().call(grow, &args);
2364        let result = builder.func.dfg.first_result(call_inst);
2365
2366        Ok(self.convert_pointer_to_index_type(builder.cursor(), result, index_type, false))
2367    }
2368
2369    pub fn translate_table_get(
2370        &mut self,
2371        builder: &mut FunctionBuilder,
2372        table_index: TableIndex,
2373        index: ir::Value,
2374    ) -> WasmResult<ir::Value> {
2375        let table = self.module.tables[table_index];
2376        let table_data = self.get_or_create_table(builder.func, table_index);
2377        let heap_ty = table.ref_type.heap_type;
2378        match heap_ty.top() {
2379            // GC-managed types.
2380            WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2381                let (src, flags) = table_data.prepare_table_addr(self, builder, index);
2382                gc::gc_compiler(self)?.translate_read_gc_reference(
2383                    self,
2384                    builder,
2385                    table.ref_type,
2386                    src,
2387                    flags,
2388                )
2389            }
2390
2391            // Function types.
2392            WasmHeapTopType::Func => {
2393                Ok(self.get_or_init_func_ref_table_elem(builder, table_index, index, false))
2394            }
2395
2396            // Continuation types.
2397            WasmHeapTopType::Cont => {
2398                let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2399                Ok(builder.ins().load(
2400                    stack_switching::fatpointer::fatpointer_type(self),
2401                    flags,
2402                    elem_addr,
2403                    0,
2404                ))
2405            }
2406        }
2407    }
2408
2409    pub fn translate_table_set(
2410        &mut self,
2411        builder: &mut FunctionBuilder,
2412        table_index: TableIndex,
2413        value: ir::Value,
2414        index: ir::Value,
2415    ) -> WasmResult<()> {
2416        let table = self.module.tables[table_index];
2417        let table_data = self.get_or_create_table(builder.func, table_index);
2418        let heap_ty = table.ref_type.heap_type;
2419        match heap_ty.top() {
2420            // GC-managed types.
2421            WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2422                let (dst, flags) = table_data.prepare_table_addr(self, builder, index);
2423                gc::gc_compiler(self)?.translate_write_gc_reference(
2424                    self,
2425                    builder,
2426                    table.ref_type,
2427                    dst,
2428                    value,
2429                    flags,
2430                )
2431            }
2432
2433            // Function types.
2434            WasmHeapTopType::Func => {
2435                let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2436                // Set the "initialized bit". See doc-comment on
2437                // `FUNCREF_INIT_BIT` in
2438                // crates/environ/src/ref_bits.rs for details.
2439                let value_with_init_bit = if self.tunables.table_lazy_init {
2440                    builder
2441                        .ins()
2442                        .bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64))
2443                } else {
2444                    value
2445                };
2446                builder
2447                    .ins()
2448                    .store(flags, value_with_init_bit, elem_addr, 0);
2449                Ok(())
2450            }
2451
2452            // Continuation types.
2453            WasmHeapTopType::Cont => {
2454                let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
2455                builder.ins().store(flags, value, elem_addr, 0);
2456                Ok(())
2457            }
2458        }
2459    }
2460
2461    pub fn translate_table_fill(
2462        &mut self,
2463        builder: &mut FunctionBuilder<'_>,
2464        table_index: TableIndex,
2465        dst: ir::Value,
2466        val: ir::Value,
2467        len: ir::Value,
2468    ) -> WasmResult<()> {
2469        let mut pos = builder.cursor();
2470        let table = self.table(table_index);
2471        let ty = table.ref_type.heap_type;
2472        let dst = self.cast_index_to_i64(&mut pos, dst, table.idx_type);
2473        let len = self.cast_index_to_i64(&mut pos, len, table.idx_type);
2474        let (table_vmctx, table_index) = self.table_vmctx_and_defined_index(&mut pos, table_index);
2475
2476        let mut args: SmallVec<[_; 6]> = smallvec![table_vmctx, table_index, dst];
2477        let libcall = match ty.top() {
2478            WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2479                args.push(val);
2480                gc::builtins::table_fill_gc_ref(self, &mut pos.func)?
2481            }
2482            WasmHeapTopType::Func => {
2483                args.push(val);
2484                self.builtin_functions.table_fill_func_ref(&mut pos.func)
2485            }
2486            WasmHeapTopType::Cont => {
2487                let (revision, contref) =
2488                    stack_switching::fatpointer::deconstruct(self, &mut pos, val);
2489                args.extend_from_slice(&[contref, revision]);
2490                stack_switching::builtins::table_fill_cont_obj(self, &mut pos.func)?
2491            }
2492        };
2493
2494        args.push(len);
2495        builder.ins().call(libcall, &args);
2496
2497        Ok(())
2498    }
2499
2500    pub fn translate_ref_i31(
2501        &mut self,
2502        mut pos: FuncCursor,
2503        val: ir::Value,
2504    ) -> WasmResult<ir::Value> {
2505        debug_assert_eq!(pos.func.dfg.value_type(val), ir::types::I32);
2506        let shifted = pos.ins().ishl_imm(val, 1);
2507        let tagged = pos
2508            .ins()
2509            .bor_imm(shifted, i64::from(crate::I31_REF_DISCRIMINANT));
2510        let (ref_ty, _needs_stack_map) = self.reference_type(WasmHeapType::I31);
2511        debug_assert_eq!(ref_ty, ir::types::I32);
2512        Ok(tagged)
2513    }
2514
2515    pub fn translate_i31_get_s(
2516        &mut self,
2517        builder: &mut FunctionBuilder,
2518        i31ref: ir::Value,
2519    ) -> WasmResult<ir::Value> {
2520        // TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
2521        // null i31)`, we could omit the `trapz`. But plumbing that type info
2522        // from `wasmparser` and through to here is a bit funky.
2523        self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
2524        Ok(builder.ins().sshr_imm(i31ref, 1))
2525    }
2526
2527    pub fn translate_i31_get_u(
2528        &mut self,
2529        builder: &mut FunctionBuilder,
2530        i31ref: ir::Value,
2531    ) -> WasmResult<ir::Value> {
2532        // TODO: If we knew we have a `(ref i31)` here, instead of maybe a `(ref
2533        // null i31)`, we could omit the `trapz`. But plumbing that type info
2534        // from `wasmparser` and through to here is a bit funky.
2535        self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
2536        Ok(builder.ins().ushr_imm(i31ref, 1))
2537    }
2538
2539    pub fn struct_fields_len(&mut self, struct_type_index: TypeIndex) -> WasmResult<usize> {
2540        let ty = self.module.types[struct_type_index].unwrap_module_type_index();
2541        match &self.types[ty].composite_type.inner {
2542            WasmCompositeInnerType::Struct(s) => Ok(s.fields.len()),
2543            _ => unreachable!(),
2544        }
2545    }
2546
2547    pub fn translate_struct_new(
2548        &mut self,
2549        builder: &mut FunctionBuilder,
2550        struct_type_index: TypeIndex,
2551        fields: StructFieldsVec,
2552    ) -> WasmResult<ir::Value> {
2553        gc::translate_struct_new(self, builder, struct_type_index, &fields)
2554    }
2555
2556    pub fn translate_struct_new_default(
2557        &mut self,
2558        builder: &mut FunctionBuilder,
2559        struct_type_index: TypeIndex,
2560    ) -> WasmResult<ir::Value> {
2561        gc::translate_struct_new_default(self, builder, struct_type_index)
2562    }
2563
2564    pub fn translate_struct_get(
2565        &mut self,
2566        builder: &mut FunctionBuilder,
2567        struct_type_index: TypeIndex,
2568        field_index: u32,
2569        struct_ref: ir::Value,
2570        extension: Option<Extension>,
2571    ) -> WasmResult<ir::Value> {
2572        gc::translate_struct_get(
2573            self,
2574            builder,
2575            struct_type_index,
2576            field_index,
2577            struct_ref,
2578            extension,
2579        )
2580    }
2581
2582    pub fn translate_struct_set(
2583        &mut self,
2584        builder: &mut FunctionBuilder,
2585        struct_type_index: TypeIndex,
2586        field_index: u32,
2587        struct_ref: ir::Value,
2588        value: ir::Value,
2589    ) -> WasmResult<()> {
2590        gc::translate_struct_set(
2591            self,
2592            builder,
2593            struct_type_index,
2594            field_index,
2595            struct_ref,
2596            value,
2597        )
2598    }
2599
2600    pub fn translate_exn_unbox(
2601        &mut self,
2602        builder: &mut FunctionBuilder<'_>,
2603        tag_index: TagIndex,
2604        exn_ref: ir::Value,
2605    ) -> WasmResult<SmallVec<[ir::Value; 4]>> {
2606        gc::translate_exn_unbox(self, builder, tag_index, exn_ref)
2607    }
2608
2609    pub fn translate_exn_throw(
2610        &mut self,
2611        builder: &mut FunctionBuilder<'_>,
2612        tag_index: TagIndex,
2613        args: &[ir::Value],
2614        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
2615    ) -> WasmResult<()> {
2616        gc::translate_exn_throw(self, builder, tag_index, args, handlers)
2617    }
2618
2619    pub fn translate_exn_throw_ref(
2620        &mut self,
2621        builder: &mut FunctionBuilder<'_>,
2622        exnref: ir::Value,
2623        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
2624    ) -> WasmResult<()> {
2625        gc::translate_exn_throw_ref(self, builder, exnref, handlers)
2626    }
2627
2628    pub fn translate_array_new(
2629        &mut self,
2630        builder: &mut FunctionBuilder,
2631        array_type_index: TypeIndex,
2632        elem: ir::Value,
2633        len: ir::Value,
2634    ) -> WasmResult<ir::Value> {
2635        gc::translate_array_new(self, builder, array_type_index, elem, len)
2636    }
2637
2638    pub fn translate_array_new_default(
2639        &mut self,
2640        builder: &mut FunctionBuilder,
2641        array_type_index: TypeIndex,
2642        len: ir::Value,
2643    ) -> WasmResult<ir::Value> {
2644        gc::translate_array_new_default(self, builder, array_type_index, len)
2645    }
2646
2647    pub fn translate_array_new_fixed(
2648        &mut self,
2649        builder: &mut FunctionBuilder,
2650        array_type_index: TypeIndex,
2651        elems: &[ir::Value],
2652    ) -> WasmResult<ir::Value> {
2653        gc::translate_array_new_fixed(self, builder, array_type_index, elems)
2654    }
2655
2656    pub fn translate_array_new_data(
2657        &mut self,
2658        builder: &mut FunctionBuilder,
2659        array_type_index: TypeIndex,
2660        data_index: DataIndex,
2661        data_offset: ir::Value,
2662        len: ir::Value,
2663    ) -> WasmResult<ir::Value> {
2664        let libcall = gc::builtins::array_new_data(self, builder.func)?;
2665        let vmctx = self.vmctx_val(&mut builder.cursor());
2666        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2667        let interned_type_index = builder
2668            .ins()
2669            .iconst(I32, i64::from(interned_type_index.as_u32()));
2670        let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2671        let call_inst = builder.ins().call(
2672            libcall,
2673            &[vmctx, interned_type_index, data_index, data_offset, len],
2674        );
2675        Ok(builder.func.dfg.first_result(call_inst))
2676    }
2677
2678    pub fn translate_array_new_elem(
2679        &mut self,
2680        builder: &mut FunctionBuilder,
2681        array_type_index: TypeIndex,
2682        elem_index: ElemIndex,
2683        elem_offset: ir::Value,
2684        len: ir::Value,
2685    ) -> WasmResult<ir::Value> {
2686        let libcall = gc::builtins::array_new_elem(self, builder.func)?;
2687        let vmctx = self.vmctx_val(&mut builder.cursor());
2688        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2689        let interned_type_index = builder
2690            .ins()
2691            .iconst(I32, i64::from(interned_type_index.as_u32()));
2692        let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
2693        let call_inst = builder.ins().call(
2694            libcall,
2695            &[vmctx, interned_type_index, elem_index, elem_offset, len],
2696        );
2697        Ok(builder.func.dfg.first_result(call_inst))
2698    }
2699
2700    pub fn translate_array_copy(
2701        &mut self,
2702        builder: &mut FunctionBuilder,
2703        _dst_array_type_index: TypeIndex,
2704        dst_array: ir::Value,
2705        dst_index: ir::Value,
2706        _src_array_type_index: TypeIndex,
2707        src_array: ir::Value,
2708        src_index: ir::Value,
2709        len: ir::Value,
2710    ) -> WasmResult<()> {
2711        let libcall = gc::builtins::array_copy(self, builder.func)?;
2712        let vmctx = self.vmctx_val(&mut builder.cursor());
2713        builder.ins().call(
2714            libcall,
2715            &[vmctx, dst_array, dst_index, src_array, src_index, len],
2716        );
2717        Ok(())
2718    }
2719
2720    pub fn translate_array_fill(
2721        &mut self,
2722        builder: &mut FunctionBuilder,
2723        array_type_index: TypeIndex,
2724        array: ir::Value,
2725        index: ir::Value,
2726        value: ir::Value,
2727        len: ir::Value,
2728    ) -> WasmResult<()> {
2729        gc::translate_array_fill(self, builder, array_type_index, array, index, value, len)
2730    }
2731
2732    pub fn translate_array_init_data(
2733        &mut self,
2734        builder: &mut FunctionBuilder,
2735        array_type_index: TypeIndex,
2736        array: ir::Value,
2737        dst_index: ir::Value,
2738        data_index: DataIndex,
2739        data_offset: ir::Value,
2740        len: ir::Value,
2741    ) -> WasmResult<()> {
2742        let libcall = gc::builtins::array_init_data(self, builder.func)?;
2743        let vmctx = self.vmctx_val(&mut builder.cursor());
2744        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2745        let interned_type_index = builder
2746            .ins()
2747            .iconst(I32, i64::from(interned_type_index.as_u32()));
2748        let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
2749        builder.ins().call(
2750            libcall,
2751            &[
2752                vmctx,
2753                interned_type_index,
2754                array,
2755                dst_index,
2756                data_index,
2757                data_offset,
2758                len,
2759            ],
2760        );
2761        Ok(())
2762    }
2763
2764    pub fn translate_array_init_elem(
2765        &mut self,
2766        builder: &mut FunctionBuilder,
2767        array_type_index: TypeIndex,
2768        array: ir::Value,
2769        dst_index: ir::Value,
2770        elem_index: ElemIndex,
2771        elem_offset: ir::Value,
2772        len: ir::Value,
2773    ) -> WasmResult<()> {
2774        let libcall = gc::builtins::array_init_elem(self, builder.func)?;
2775        let vmctx = self.vmctx_val(&mut builder.cursor());
2776        let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
2777        let interned_type_index = builder
2778            .ins()
2779            .iconst(I32, i64::from(interned_type_index.as_u32()));
2780        let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
2781        builder.ins().call(
2782            libcall,
2783            &[
2784                vmctx,
2785                interned_type_index,
2786                array,
2787                dst_index,
2788                elem_index,
2789                elem_offset,
2790                len,
2791            ],
2792        );
2793        Ok(())
2794    }
2795
2796    pub fn translate_array_len(
2797        &mut self,
2798        builder: &mut FunctionBuilder,
2799        array: ir::Value,
2800    ) -> WasmResult<ir::Value> {
2801        gc::translate_array_len(self, builder, array)
2802    }
2803
2804    pub fn translate_array_get(
2805        &mut self,
2806        builder: &mut FunctionBuilder,
2807        array_type_index: TypeIndex,
2808        array: ir::Value,
2809        index: ir::Value,
2810        extension: Option<Extension>,
2811    ) -> WasmResult<ir::Value> {
2812        gc::translate_array_get(self, builder, array_type_index, array, index, extension)
2813    }
2814
2815    pub fn translate_array_set(
2816        &mut self,
2817        builder: &mut FunctionBuilder,
2818        array_type_index: TypeIndex,
2819        array: ir::Value,
2820        index: ir::Value,
2821        value: ir::Value,
2822    ) -> WasmResult<()> {
2823        gc::translate_array_set(self, builder, array_type_index, array, index, value)
2824    }
2825
2826    pub fn translate_ref_test(
2827        &mut self,
2828        builder: &mut FunctionBuilder<'_>,
2829        test_ty: WasmRefType,
2830        gc_ref: ir::Value,
2831        gc_ref_ty: WasmRefType,
2832    ) -> WasmResult<ir::Value> {
2833        gc::translate_ref_test(self, builder, test_ty, gc_ref, gc_ref_ty)
2834    }
2835
2836    pub fn translate_ref_null(
2837        &mut self,
2838        mut pos: cranelift_codegen::cursor::FuncCursor,
2839        ht: WasmHeapType,
2840    ) -> WasmResult<ir::Value> {
2841        Ok(match ht.top() {
2842            WasmHeapTopType::Func => pos.ins().iconst(self.pointer_type(), 0),
2843            // NB: null GC references don't need to be in stack maps.
2844            WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
2845                pos.ins().iconst(types::I32, 0)
2846            }
2847            WasmHeapTopType::Cont => {
2848                let zero = pos.ins().iconst(self.pointer_type(), 0);
2849                stack_switching::fatpointer::construct(self, &mut pos, zero, zero)
2850            }
2851        })
2852    }
2853
2854    pub fn translate_ref_is_null(
2855        &mut self,
2856        mut pos: cranelift_codegen::cursor::FuncCursor,
2857        value: ir::Value,
2858        ty: WasmRefType,
2859    ) -> WasmResult<ir::Value> {
2860        // If we know the type is not nullable, then we don't actually need to
2861        // check for null.
2862        if !ty.nullable {
2863            return Ok(pos.ins().iconst(ir::types::I32, 0));
2864        }
2865
2866        let byte_is_null = match ty.heap_type.top() {
2867            WasmHeapTopType::Cont => {
2868                let (_revision, contref) =
2869                    stack_switching::fatpointer::deconstruct(self, &mut pos, value);
2870                pos.ins()
2871                    .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, contref, 0)
2872            }
2873            _ => pos
2874                .ins()
2875                .icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0),
2876        };
2877
2878        Ok(pos.ins().uextend(ir::types::I32, byte_is_null))
2879    }
2880
2881    pub fn translate_ref_func(
2882        &mut self,
2883        mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
2884        func_index: FuncIndex,
2885    ) -> WasmResult<ir::Value> {
2886        let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
2887        let ref_func = self.builtin_functions.ref_func(&mut pos.func);
2888        let vmctx = self.vmctx_val(&mut pos);
2889
2890        let call_inst = pos.ins().call(ref_func, &[vmctx, func_index]);
2891        Ok(pos.func.dfg.first_result(call_inst))
2892    }
2893
2894    pub(crate) fn translate_global_get(
2895        &mut self,
2896        builder: &mut FunctionBuilder<'_>,
2897        global_index: GlobalIndex,
2898    ) -> WasmResult<ir::Value> {
2899        match self.get_or_create_global(builder.func, global_index) {
2900            GlobalVariable::Memory { gv, offset, ty } => {
2901                let addr = builder.ins().global_value(self.pointer_type(), gv);
2902                let mut flags = ir::MemFlags::trusted();
2903                // Store vector globals in little-endian format to avoid
2904                // byte swaps on big-endian platforms since at-rest vectors
2905                // should already be in little-endian format anyway.
2906                if ty.is_vector() {
2907                    flags.set_endianness(ir::Endianness::Little);
2908                }
2909                // Put globals in the "table" abstract heap category as well.
2910                flags.set_alias_region(Some(ir::AliasRegion::Table));
2911                Ok(builder.ins().load(ty, flags, addr, offset))
2912            }
2913            GlobalVariable::Custom => {
2914                let global_ty = self.module.globals[global_index];
2915                let wasm_ty = global_ty.wasm_ty;
2916                debug_assert!(
2917                    wasm_ty.is_vmgcref_type(),
2918                    "We only use GlobalVariable::Custom for VMGcRef types"
2919                );
2920                let WasmValType::Ref(ref_ty) = wasm_ty else {
2921                    unreachable!()
2922                };
2923
2924                let (gv, offset) = self.get_global_location(builder.func, global_index);
2925                let gv = builder.ins().global_value(self.pointer_type(), gv);
2926                let src = builder.ins().iadd_imm(gv, i64::from(offset));
2927
2928                gc::gc_compiler(self)?.translate_read_gc_reference(
2929                    self,
2930                    builder,
2931                    ref_ty,
2932                    src,
2933                    if global_ty.mutability {
2934                        ir::MemFlags::trusted()
2935                    } else {
2936                        ir::MemFlags::trusted().with_readonly().with_can_move()
2937                    },
2938                )
2939            }
2940        }
2941    }
2942
2943    pub(crate) fn translate_global_set(
2944        &mut self,
2945        builder: &mut FunctionBuilder<'_>,
2946        global_index: GlobalIndex,
2947        val: ir::Value,
2948    ) -> WasmResult<()> {
2949        match self.get_or_create_global(builder.func, global_index) {
2950            GlobalVariable::Memory { gv, offset, ty } => {
2951                let addr = builder.ins().global_value(self.pointer_type(), gv);
2952                let mut flags = ir::MemFlags::trusted();
2953                // Like `global.get`, store globals in little-endian format.
2954                if ty.is_vector() {
2955                    flags.set_endianness(ir::Endianness::Little);
2956                }
2957                // Put globals in the "table" abstract heap category as well.
2958                flags.set_alias_region(Some(ir::AliasRegion::Table));
2959                debug_assert_eq!(ty, builder.func.dfg.value_type(val));
2960                builder.ins().store(flags, val, addr, offset);
2961                self.update_global(builder, global_index, val);
2962            }
2963            GlobalVariable::Custom => {
2964                let ty = self.module.globals[global_index].wasm_ty;
2965                debug_assert!(
2966                    ty.is_vmgcref_type(),
2967                    "We only use GlobalVariable::Custom for VMGcRef types"
2968                );
2969                let WasmValType::Ref(ty) = ty else {
2970                    unreachable!()
2971                };
2972
2973                let (gv, offset) = self.get_global_location(builder.func, global_index);
2974                let gv = builder.ins().global_value(self.pointer_type(), gv);
2975                let src = builder.ins().iadd_imm(gv, i64::from(offset));
2976
2977                gc::gc_compiler(self)?.translate_write_gc_reference(
2978                    self,
2979                    builder,
2980                    ty,
2981                    src,
2982                    val,
2983                    ir::MemFlags::trusted(),
2984                )?
2985            }
2986        }
2987        Ok(())
2988    }
2989
2990    pub fn translate_call_indirect<'a>(
2991        &mut self,
2992        builder: &'a mut FunctionBuilder,
2993        features: &WasmFeatures,
2994        table_index: TableIndex,
2995        ty_index: TypeIndex,
2996        sig_ref: ir::SigRef,
2997        callee: ir::Value,
2998        call_args: &[ir::Value],
2999        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
3000    ) -> WasmResult<Option<CallRets>> {
3001        Call::new(builder, self, handlers).indirect_call(
3002            features,
3003            table_index,
3004            ty_index,
3005            sig_ref,
3006            callee,
3007            call_args,
3008        )
3009    }
3010
3011    pub fn translate_call<'a>(
3012        &mut self,
3013        builder: &'a mut FunctionBuilder,
3014        callee_index: FuncIndex,
3015        sig_ref: ir::SigRef,
3016        call_args: &[ir::Value],
3017        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
3018    ) -> WasmResult<CallRets> {
3019        Call::new(builder, self, handlers).direct_call(callee_index, sig_ref, call_args)
3020    }
3021
3022    pub fn translate_call_ref<'a>(
3023        &mut self,
3024        builder: &'a mut FunctionBuilder,
3025        sig_ref: ir::SigRef,
3026        callee: ir::Value,
3027        call_args: &[ir::Value],
3028        handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
3029    ) -> WasmResult<CallRets> {
3030        Call::new(builder, self, handlers).call_ref(sig_ref, callee, call_args)
3031    }
3032
3033    pub fn translate_return_call(
3034        &mut self,
3035        builder: &mut FunctionBuilder,
3036        callee_index: FuncIndex,
3037        sig_ref: ir::SigRef,
3038        call_args: &[ir::Value],
3039    ) -> WasmResult<()> {
3040        Call::new_tail(builder, self).direct_call(callee_index, sig_ref, call_args)?;
3041        Ok(())
3042    }
3043
3044    pub fn translate_return_call_indirect(
3045        &mut self,
3046        builder: &mut FunctionBuilder,
3047        features: &WasmFeatures,
3048        table_index: TableIndex,
3049        ty_index: TypeIndex,
3050        sig_ref: ir::SigRef,
3051        callee: ir::Value,
3052        call_args: &[ir::Value],
3053    ) -> WasmResult<()> {
3054        Call::new_tail(builder, self).indirect_call(
3055            features,
3056            table_index,
3057            ty_index,
3058            sig_ref,
3059            callee,
3060            call_args,
3061        )?;
3062        Ok(())
3063    }
3064
3065    pub fn translate_return_call_ref(
3066        &mut self,
3067        builder: &mut FunctionBuilder,
3068        sig_ref: ir::SigRef,
3069        callee: ir::Value,
3070        call_args: &[ir::Value],
3071    ) -> WasmResult<()> {
3072        Call::new_tail(builder, self).call_ref(sig_ref, callee, call_args)?;
3073        Ok(())
3074    }
3075
3076    /// Returns two `ir::Value`s, the first of which is the vmctx for the memory
3077    /// `index` and the second of which is the `DefinedMemoryIndex` for `index`.
3078    ///
3079    /// Handles internally whether `index` is an imported memory or not.
3080    fn memory_vmctx_and_defined_index(
3081        &mut self,
3082        pos: &mut FuncCursor,
3083        index: MemoryIndex,
3084    ) -> (ir::Value, ir::Value) {
3085        let cur_vmctx = self.vmctx_val(pos);
3086        match self.module.defined_memory_index(index) {
3087            // This is a defined memory, so the vmctx is our own and the defined
3088            // index is `index` here.
3089            Some(index) => (cur_vmctx, pos.ins().iconst(I32, i64::from(index.as_u32()))),
3090
3091            // This is an imported memory, so load the vmctx/defined index from
3092            // the import definition itself.
3093            None => {
3094                let vmimport = self.offsets.vmctx_vmmemory_import(index);
3095
3096                let vmctx = pos.ins().load(
3097                    self.isa.pointer_type(),
3098                    ir::MemFlags::trusted(),
3099                    cur_vmctx,
3100                    i32::try_from(vmimport + u32::from(self.offsets.vmmemory_import_vmctx()))
3101                        .unwrap(),
3102                );
3103                let index = pos.ins().load(
3104                    ir::types::I32,
3105                    ir::MemFlags::trusted(),
3106                    cur_vmctx,
3107                    i32::try_from(vmimport + u32::from(self.offsets.vmmemory_import_index()))
3108                        .unwrap(),
3109                );
3110                (vmctx, index)
3111            }
3112        }
3113    }
3114
3115    /// Returns two `ir::Value`s, the first of which is the vmctx for the table
3116    /// `index` and the second of which is the `DefinedTableIndex` for `index`.
3117    ///
3118    /// Handles internally whether `index` is an imported table or not.
3119    fn table_vmctx_and_defined_index(
3120        &mut self,
3121        pos: &mut FuncCursor,
3122        index: TableIndex,
3123    ) -> (ir::Value, ir::Value) {
3124        // NB: the body of this method is similar to
3125        // `memory_vmctx_and_defined_index` above.
3126        let cur_vmctx = self.vmctx_val(pos);
3127        match self.module.defined_table_index(index) {
3128            Some(index) => (cur_vmctx, pos.ins().iconst(I32, i64::from(index.as_u32()))),
3129            None => {
3130                let vmimport = self.offsets.vmctx_vmtable_import(index);
3131
3132                let vmctx = pos.ins().load(
3133                    self.isa.pointer_type(),
3134                    ir::MemFlags::trusted(),
3135                    cur_vmctx,
3136                    i32::try_from(vmimport + u32::from(self.offsets.vmtable_import_vmctx()))
3137                        .unwrap(),
3138                );
3139                let index = pos.ins().load(
3140                    ir::types::I32,
3141                    ir::MemFlags::trusted(),
3142                    cur_vmctx,
3143                    i32::try_from(vmimport + u32::from(self.offsets.vmtable_import_index()))
3144                        .unwrap(),
3145                );
3146                (vmctx, index)
3147            }
3148        }
3149    }
3150
3151    pub fn translate_memory_grow(
3152        &mut self,
3153        builder: &mut FunctionBuilder<'_>,
3154        index: MemoryIndex,
3155        val: ir::Value,
3156    ) -> WasmResult<ir::Value> {
3157        let mut pos = builder.cursor();
3158        let memory_grow = self.builtin_functions.memory_grow(&mut pos.func);
3159
3160        let (memory_vmctx, defined_memory_index) =
3161            self.memory_vmctx_and_defined_index(&mut pos, index);
3162
3163        let index_type = self.memory(index).idx_type;
3164        let val = self.cast_index_to_i64(&mut pos, val, index_type);
3165        let call_inst = pos
3166            .ins()
3167            .call(memory_grow, &[memory_vmctx, val, defined_memory_index]);
3168        let result = *pos.func.dfg.inst_results(call_inst).first().unwrap();
3169        let single_byte_pages = match self.memory(index).page_size_log2 {
3170            16 => false,
3171            0 => true,
3172            _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
3173        };
3174        Ok(self.convert_pointer_to_index_type(
3175            builder.cursor(),
3176            result,
3177            index_type,
3178            single_byte_pages,
3179        ))
3180    }
3181
3182    pub fn translate_memory_size(
3183        &mut self,
3184        mut pos: FuncCursor<'_>,
3185        index: MemoryIndex,
3186    ) -> WasmResult<ir::Value> {
3187        let pointer_type = self.pointer_type();
3188        let vmctx = self.vmctx(&mut pos.func);
3189        let is_shared = self.module.memories[index].shared;
3190        let base = pos.ins().global_value(pointer_type, vmctx);
3191        let current_length_in_bytes = match self.module.defined_memory_index(index) {
3192            Some(def_index) => {
3193                if is_shared {
3194                    let offset =
3195                        i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap();
3196                    let vmmemory_ptr =
3197                        pos.ins()
3198                            .load(pointer_type, ir::MemFlags::trusted(), base, offset);
3199                    let vmmemory_definition_offset =
3200                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
3201                    let vmmemory_definition_ptr =
3202                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
3203                    // This atomic access of the
3204                    // `VMMemoryDefinition::current_length` is direct; no bounds
3205                    // check is needed. This is possible because shared memory
3206                    // has a static size (the maximum is always known). Shared
3207                    // memory is thus built with a static memory plan and no
3208                    // bounds-checked version of this is implemented.
3209                    pos.ins().atomic_load(
3210                        pointer_type,
3211                        ir::MemFlags::trusted(),
3212                        vmmemory_definition_ptr,
3213                    )
3214                } else {
3215                    let owned_index = self.module.owned_memory_index(def_index);
3216                    let offset = i32::try_from(
3217                        self.offsets
3218                            .vmctx_vmmemory_definition_current_length(owned_index),
3219                    )
3220                    .unwrap();
3221                    pos.ins()
3222                        .load(pointer_type, ir::MemFlags::trusted(), base, offset)
3223                }
3224            }
3225            None => {
3226                let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
3227                let vmmemory_ptr =
3228                    pos.ins()
3229                        .load(pointer_type, ir::MemFlags::trusted(), base, offset);
3230                if is_shared {
3231                    let vmmemory_definition_offset =
3232                        i64::from(self.offsets.ptr.vmmemory_definition_current_length());
3233                    let vmmemory_definition_ptr =
3234                        pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
3235                    pos.ins().atomic_load(
3236                        pointer_type,
3237                        ir::MemFlags::trusted(),
3238                        vmmemory_definition_ptr,
3239                    )
3240                } else {
3241                    pos.ins().load(
3242                        pointer_type,
3243                        ir::MemFlags::trusted(),
3244                        vmmemory_ptr,
3245                        i32::from(self.offsets.ptr.vmmemory_definition_current_length()),
3246                    )
3247                }
3248            }
3249        };
3250
3251        let page_size_log2 = i64::from(self.module.memories[index].page_size_log2);
3252        let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2);
3253        let single_byte_pages = match page_size_log2 {
3254            16 => false,
3255            0 => true,
3256            _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
3257        };
3258        Ok(self.convert_pointer_to_index_type(
3259            pos,
3260            current_length_in_pages,
3261            self.memory(index).idx_type,
3262            single_byte_pages,
3263        ))
3264    }
3265
3266    pub fn translate_memory_copy(
3267        &mut self,
3268        builder: &mut FunctionBuilder<'_>,
3269        src_index: MemoryIndex,
3270        dst_index: MemoryIndex,
3271        dst: ir::Value,
3272        src: ir::Value,
3273        len: ir::Value,
3274    ) -> WasmResult<()> {
3275        let mut pos = builder.cursor();
3276        let vmctx = self.vmctx_val(&mut pos);
3277
3278        let memory_copy = self.builtin_functions.memory_copy(&mut pos.func);
3279        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(dst_index).idx_type);
3280        let src = self.cast_index_to_i64(&mut pos, src, self.memory(src_index).idx_type);
3281        // The length is 32-bit if either memory is 32-bit, but if they're both
3282        // 64-bit then it's 64-bit. Our intrinsic takes a 64-bit length for
3283        // compatibility across all memories, so make sure that it's cast
3284        // correctly here (this is a bit special so no generic helper unlike for
3285        // `dst`/`src` above)
3286        let len = if index_type_to_ir_type(self.memory(dst_index).idx_type) == I64
3287            && index_type_to_ir_type(self.memory(src_index).idx_type) == I64
3288        {
3289            len
3290        } else {
3291            pos.ins().uextend(I64, len)
3292        };
3293        let src_index = pos.ins().iconst(I32, i64::from(src_index.as_u32()));
3294        let dst_index = pos.ins().iconst(I32, i64::from(dst_index.as_u32()));
3295        pos.ins()
3296            .call(memory_copy, &[vmctx, dst_index, dst, src_index, src, len]);
3297
3298        Ok(())
3299    }
3300
3301    pub fn translate_memory_fill(
3302        &mut self,
3303        builder: &mut FunctionBuilder<'_>,
3304        memory_index: MemoryIndex,
3305        dst: ir::Value,
3306        val: ir::Value,
3307        len: ir::Value,
3308    ) -> WasmResult<()> {
3309        let mut pos = builder.cursor();
3310        let memory_fill = self.builtin_functions.memory_fill(&mut pos.func);
3311        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
3312        let len = self.cast_index_to_i64(&mut pos, len, self.memory(memory_index).idx_type);
3313        let (memory_vmctx, defined_memory_index) =
3314            self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3315
3316        pos.ins().call(
3317            memory_fill,
3318            &[memory_vmctx, defined_memory_index, dst, val, len],
3319        );
3320
3321        Ok(())
3322    }
3323
3324    pub fn translate_memory_init(
3325        &mut self,
3326        builder: &mut FunctionBuilder<'_>,
3327        memory_index: MemoryIndex,
3328        seg_index: u32,
3329        dst: ir::Value,
3330        src: ir::Value,
3331        len: ir::Value,
3332    ) -> WasmResult<()> {
3333        let mut pos = builder.cursor();
3334        let memory_init = self.builtin_functions.memory_init(&mut pos.func);
3335
3336        let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
3337        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
3338
3339        let vmctx = self.vmctx_val(&mut pos);
3340
3341        let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
3342
3343        pos.ins().call(
3344            memory_init,
3345            &[vmctx, memory_index_arg, seg_index_arg, dst, src, len],
3346        );
3347
3348        Ok(())
3349    }
3350
3351    pub fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
3352        let data_drop = self.builtin_functions.data_drop(&mut pos.func);
3353        let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
3354        let vmctx = self.vmctx_val(&mut pos);
3355        pos.ins().call(data_drop, &[vmctx, seg_index_arg]);
3356        Ok(())
3357    }
3358
3359    pub fn translate_table_size(
3360        &mut self,
3361        pos: FuncCursor,
3362        table_index: TableIndex,
3363    ) -> WasmResult<ir::Value> {
3364        let table_data = self.get_or_create_table(pos.func, table_index);
3365        let index_type = index_type_to_ir_type(self.table(table_index).idx_type);
3366        Ok(table_data.bound.bound(&*self.isa, pos, index_type))
3367    }
3368
3369    pub fn translate_table_copy(
3370        &mut self,
3371        builder: &mut FunctionBuilder<'_>,
3372        dst_table_index: TableIndex,
3373        src_table_index: TableIndex,
3374        dst: ir::Value,
3375        src: ir::Value,
3376        len: ir::Value,
3377    ) -> WasmResult<()> {
3378        let (table_copy, dst_table_index_arg, src_table_index_arg) =
3379            self.get_table_copy_func(&mut builder.func, dst_table_index, src_table_index);
3380
3381        let mut pos = builder.cursor();
3382        let dst = self.cast_index_to_i64(&mut pos, dst, self.table(dst_table_index).idx_type);
3383        let src = self.cast_index_to_i64(&mut pos, src, self.table(src_table_index).idx_type);
3384        let len = if index_type_to_ir_type(self.table(dst_table_index).idx_type) == I64
3385            && index_type_to_ir_type(self.table(src_table_index).idx_type) == I64
3386        {
3387            len
3388        } else {
3389            pos.ins().uextend(I64, len)
3390        };
3391        let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64);
3392        let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64);
3393        let vmctx = self.vmctx_val(&mut pos);
3394        pos.ins().call(
3395            table_copy,
3396            &[
3397                vmctx,
3398                dst_table_index_arg,
3399                src_table_index_arg,
3400                dst,
3401                src,
3402                len,
3403            ],
3404        );
3405
3406        Ok(())
3407    }
3408
3409    pub fn translate_table_init(
3410        &mut self,
3411        builder: &mut FunctionBuilder<'_>,
3412        seg_index: u32,
3413        table_index: TableIndex,
3414        dst: ir::Value,
3415        src: ir::Value,
3416        len: ir::Value,
3417    ) -> WasmResult<()> {
3418        let mut pos = builder.cursor();
3419        let table_init = self.builtin_functions.table_init(&mut pos.func);
3420        let table_index_arg = pos.ins().iconst(I32, i64::from(table_index.as_u32()));
3421        let seg_index_arg = pos.ins().iconst(I32, i64::from(seg_index));
3422        let vmctx = self.vmctx_val(&mut pos);
3423        let index_type = self.table(table_index).idx_type;
3424        let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
3425        let src = pos.ins().uextend(I64, src);
3426        let len = pos.ins().uextend(I64, len);
3427
3428        pos.ins().call(
3429            table_init,
3430            &[vmctx, table_index_arg, seg_index_arg, dst, src, len],
3431        );
3432
3433        Ok(())
3434    }
3435
3436    pub fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> {
3437        let elem_drop = self.builtin_functions.elem_drop(&mut pos.func);
3438        let elem_index_arg = pos.ins().iconst(I32, elem_index as i64);
3439        let vmctx = self.vmctx_val(&mut pos);
3440        pos.ins().call(elem_drop, &[vmctx, elem_index_arg]);
3441        Ok(())
3442    }
3443
3444    pub fn translate_atomic_wait(
3445        &mut self,
3446        builder: &mut FunctionBuilder<'_>,
3447        memory_index: MemoryIndex,
3448        _heap: Heap,
3449        addr: ir::Value,
3450        expected: ir::Value,
3451        timeout: ir::Value,
3452    ) -> WasmResult<ir::Value> {
3453        #[cfg(feature = "threads")]
3454        {
3455            let mut pos = builder.cursor();
3456            let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
3457            let implied_ty = pos.func.dfg.value_type(expected);
3458            let wait_func = self.get_memory_atomic_wait(&mut pos.func, implied_ty);
3459
3460            let (memory_vmctx, defined_memory_index) =
3461                self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3462
3463            let call_inst = pos.ins().call(
3464                wait_func,
3465                &[memory_vmctx, defined_memory_index, addr, expected, timeout],
3466            );
3467            let ret = pos.func.dfg.inst_results(call_inst)[0];
3468            Ok(builder.ins().ireduce(ir::types::I32, ret))
3469        }
3470        #[cfg(not(feature = "threads"))]
3471        {
3472            let _ = (builder, memory_index, addr, expected, timeout);
3473            Err(wasmtime_environ::WasmError::Unsupported(
3474                "threads support disabled at compile time".to_string(),
3475            ))
3476        }
3477    }
3478
3479    pub fn translate_atomic_notify(
3480        &mut self,
3481        builder: &mut FunctionBuilder<'_>,
3482        memory_index: MemoryIndex,
3483        _heap: Heap,
3484        addr: ir::Value,
3485        count: ir::Value,
3486    ) -> WasmResult<ir::Value> {
3487        #[cfg(feature = "threads")]
3488        {
3489            let mut pos = builder.cursor();
3490            let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
3491            let atomic_notify = self.builtin_functions.memory_atomic_notify(&mut pos.func);
3492
3493            let (memory_vmctx, defined_memory_index) =
3494                self.memory_vmctx_and_defined_index(&mut pos, memory_index);
3495            let call_inst = pos.ins().call(
3496                atomic_notify,
3497                &[memory_vmctx, defined_memory_index, addr, count],
3498            );
3499            let ret = pos.func.dfg.inst_results(call_inst)[0];
3500            Ok(builder.ins().ireduce(ir::types::I32, ret))
3501        }
3502        #[cfg(not(feature = "threads"))]
3503        {
3504            let _ = (builder, memory_index, addr, count);
3505            Err(wasmtime_environ::WasmError::Unsupported(
3506                "threads support disabled at compile time".to_string(),
3507            ))
3508        }
3509    }
3510
3511    pub fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
3512        // Additionally if enabled check how much fuel we have remaining to see
3513        // if we've run out by this point.
3514        if self.tunables.consume_fuel {
3515            self.fuel_check(builder);
3516        }
3517
3518        // If we are performing epoch-based interruption, check to see
3519        // if the epoch counter has changed.
3520        if self.tunables.epoch_interruption {
3521            self.epoch_check(builder);
3522        }
3523
3524        Ok(())
3525    }
3526
3527    pub fn before_translate_operator(
3528        &mut self,
3529        op: &Operator,
3530        _operand_types: Option<&[WasmValType]>,
3531        builder: &mut FunctionBuilder,
3532        state: &FuncTranslationStacks,
3533    ) -> WasmResult<()> {
3534        if self.tunables.consume_fuel {
3535            self.fuel_before_op(op, builder, state.reachable());
3536        }
3537        Ok(())
3538    }
3539
3540    pub fn after_translate_operator(
3541        &mut self,
3542        op: &Operator,
3543        _operand_types: Option<&[WasmValType]>,
3544        builder: &mut FunctionBuilder,
3545        state: &FuncTranslationStacks,
3546    ) -> WasmResult<()> {
3547        if self.tunables.consume_fuel && state.reachable() {
3548            self.fuel_after_op(op, builder);
3549        }
3550        Ok(())
3551    }
3552
3553    pub fn before_unconditionally_trapping_memory_access(&mut self, builder: &mut FunctionBuilder) {
3554        if self.tunables.consume_fuel {
3555            self.fuel_increment_var(builder);
3556            self.fuel_save_from_var(builder);
3557        }
3558    }
3559
3560    pub fn before_translate_function(
3561        &mut self,
3562        builder: &mut FunctionBuilder,
3563        _state: &FuncTranslationStacks,
3564    ) -> WasmResult<()> {
3565        // If an explicit stack limit is requested, emit one here at the start
3566        // of the function.
3567        if let Some(gv) = self.stack_limit_at_function_entry {
3568            let limit = builder.ins().global_value(self.pointer_type(), gv);
3569            let sp = builder.ins().get_stack_pointer(self.pointer_type());
3570            let overflow = builder.ins().icmp(IntCC::UnsignedLessThan, sp, limit);
3571            self.conditionally_trap(builder, overflow, ir::TrapCode::STACK_OVERFLOW);
3572        }
3573
3574        // Additionally we initialize `fuel_var` if it will get used.
3575        if self.tunables.consume_fuel {
3576            self.fuel_function_entry(builder);
3577        }
3578
3579        // Initialize `epoch_var` with the current epoch.
3580        if self.tunables.epoch_interruption {
3581            self.epoch_function_entry(builder);
3582        }
3583
3584        #[cfg(feature = "wmemcheck")]
3585        if self.compiler.wmemcheck {
3586            let func_name = self.current_func_name(builder);
3587            if func_name == Some("malloc") {
3588                self.check_malloc_start(builder);
3589            } else if func_name == Some("free") {
3590                self.check_free_start(builder);
3591            }
3592        }
3593
3594        Ok(())
3595    }
3596
3597    pub fn after_translate_function(
3598        &mut self,
3599        builder: &mut FunctionBuilder,
3600        state: &FuncTranslationStacks,
3601    ) -> WasmResult<()> {
3602        if self.tunables.consume_fuel && state.reachable() {
3603            self.fuel_function_exit(builder);
3604        }
3605        Ok(())
3606    }
3607
3608    pub fn relaxed_simd_deterministic(&self) -> bool {
3609        self.tunables.relaxed_simd_deterministic
3610    }
3611
3612    pub fn has_native_fma(&self) -> bool {
3613        self.isa.has_native_fma()
3614    }
3615
3616    pub fn is_x86(&self) -> bool {
3617        self.isa.triple().architecture == target_lexicon::Architecture::X86_64
3618    }
3619
3620    pub fn translate_cont_bind(
3621        &mut self,
3622        builder: &mut FunctionBuilder<'_>,
3623        contobj: ir::Value,
3624        args: &[ir::Value],
3625    ) -> ir::Value {
3626        stack_switching::instructions::translate_cont_bind(self, builder, contobj, args)
3627    }
3628
3629    pub fn translate_cont_new(
3630        &mut self,
3631        builder: &mut FunctionBuilder<'_>,
3632        func: ir::Value,
3633        arg_types: &[WasmValType],
3634        return_types: &[WasmValType],
3635    ) -> WasmResult<ir::Value> {
3636        stack_switching::instructions::translate_cont_new(
3637            self,
3638            builder,
3639            func,
3640            arg_types,
3641            return_types,
3642        )
3643    }
3644
3645    pub fn translate_resume(
3646        &mut self,
3647        builder: &mut FunctionBuilder<'_>,
3648        type_index: u32,
3649        contobj: ir::Value,
3650        resume_args: &[ir::Value],
3651        resumetable: &[(u32, Option<ir::Block>)],
3652    ) -> WasmResult<Vec<ir::Value>> {
3653        stack_switching::instructions::translate_resume(
3654            self,
3655            builder,
3656            type_index,
3657            contobj,
3658            resume_args,
3659            resumetable,
3660        )
3661    }
3662
3663    pub fn translate_suspend(
3664        &mut self,
3665        builder: &mut FunctionBuilder<'_>,
3666        tag_index: u32,
3667        suspend_args: &[ir::Value],
3668        tag_return_types: &[ir::Type],
3669    ) -> Vec<ir::Value> {
3670        stack_switching::instructions::translate_suspend(
3671            self,
3672            builder,
3673            tag_index,
3674            suspend_args,
3675            tag_return_types,
3676        )
3677    }
3678
3679    /// Translates switch instructions.
3680    pub fn translate_switch(
3681        &mut self,
3682        builder: &mut FunctionBuilder,
3683        tag_index: u32,
3684        contobj: ir::Value,
3685        switch_args: &[ir::Value],
3686        return_types: &[ir::Type],
3687    ) -> WasmResult<Vec<ir::Value>> {
3688        stack_switching::instructions::translate_switch(
3689            self,
3690            builder,
3691            tag_index,
3692            contobj,
3693            switch_args,
3694            return_types,
3695        )
3696    }
3697
3698    pub fn continuation_arguments(&self, index: TypeIndex) -> &[WasmValType] {
3699        let idx = self.module.types[index].unwrap_module_type_index();
3700        self.types[self.types[idx]
3701            .unwrap_cont()
3702            .clone()
3703            .unwrap_module_type_index()]
3704        .unwrap_func()
3705        .params()
3706    }
3707
3708    pub fn continuation_returns(&self, index: TypeIndex) -> &[WasmValType] {
3709        let idx = self.module.types[index].unwrap_module_type_index();
3710        self.types[self.types[idx]
3711            .unwrap_cont()
3712            .clone()
3713            .unwrap_module_type_index()]
3714        .unwrap_func()
3715        .returns()
3716    }
3717
3718    pub fn tag_params(&self, tag_index: TagIndex) -> &[WasmValType] {
3719        let idx = self.module.tags[tag_index].signature;
3720        self.types[idx.unwrap_module_type_index()]
3721            .unwrap_func()
3722            .params()
3723    }
3724
3725    pub fn tag_returns(&self, tag_index: TagIndex) -> &[WasmValType] {
3726        let idx = self.module.tags[tag_index].signature;
3727        self.types[idx.unwrap_module_type_index()]
3728            .unwrap_func()
3729            .returns()
3730    }
3731
3732    pub fn use_x86_blendv_for_relaxed_laneselect(&self, ty: Type) -> bool {
3733        self.isa.has_x86_blendv_lowering(ty)
3734    }
3735
3736    pub fn use_x86_pmulhrsw_for_relaxed_q15mul(&self) -> bool {
3737        self.isa.has_x86_pmulhrsw_lowering()
3738    }
3739
3740    pub fn use_x86_pmaddubsw_for_dot(&self) -> bool {
3741        self.isa.has_x86_pmaddubsw_lowering()
3742    }
3743
3744    pub fn handle_before_return(&mut self, retvals: &[ir::Value], builder: &mut FunctionBuilder) {
3745        #[cfg(feature = "wmemcheck")]
3746        if self.compiler.wmemcheck {
3747            let func_name = self.current_func_name(builder);
3748            if func_name == Some("malloc") {
3749                self.hook_malloc_exit(builder, retvals);
3750            } else if func_name == Some("free") {
3751                self.hook_free_exit(builder);
3752            }
3753        }
3754        #[cfg(not(feature = "wmemcheck"))]
3755        let _ = (retvals, builder);
3756    }
3757
3758    pub fn before_load(
3759        &mut self,
3760        builder: &mut FunctionBuilder,
3761        val_size: u8,
3762        addr: ir::Value,
3763        offset: u64,
3764    ) {
3765        #[cfg(feature = "wmemcheck")]
3766        if self.compiler.wmemcheck {
3767            let check_load = self.builtin_functions.check_load(builder.func);
3768            let vmctx = self.vmctx_val(&mut builder.cursor());
3769            let num_bytes = builder.ins().iconst(I32, val_size as i64);
3770            let offset_val = builder.ins().iconst(I64, offset as i64);
3771            builder
3772                .ins()
3773                .call(check_load, &[vmctx, num_bytes, addr, offset_val]);
3774        }
3775        #[cfg(not(feature = "wmemcheck"))]
3776        let _ = (builder, val_size, addr, offset);
3777    }
3778
3779    pub fn before_store(
3780        &mut self,
3781        builder: &mut FunctionBuilder,
3782        val_size: u8,
3783        addr: ir::Value,
3784        offset: u64,
3785    ) {
3786        #[cfg(feature = "wmemcheck")]
3787        if self.compiler.wmemcheck {
3788            let check_store = self.builtin_functions.check_store(builder.func);
3789            let vmctx = self.vmctx_val(&mut builder.cursor());
3790            let num_bytes = builder.ins().iconst(I32, val_size as i64);
3791            let offset_val = builder.ins().iconst(I64, offset as i64);
3792            builder
3793                .ins()
3794                .call(check_store, &[vmctx, num_bytes, addr, offset_val]);
3795        }
3796        #[cfg(not(feature = "wmemcheck"))]
3797        let _ = (builder, val_size, addr, offset);
3798    }
3799
3800    pub fn update_global(
3801        &mut self,
3802        builder: &mut FunctionBuilder,
3803        global_index: GlobalIndex,
3804        value: ir::Value,
3805    ) {
3806        #[cfg(feature = "wmemcheck")]
3807        if self.compiler.wmemcheck {
3808            if global_index.index() == 0 {
3809                // We are making the assumption that global 0 is the auxiliary stack pointer.
3810                let update_stack_pointer =
3811                    self.builtin_functions.update_stack_pointer(builder.func);
3812                let vmctx = self.vmctx_val(&mut builder.cursor());
3813                builder.ins().call(update_stack_pointer, &[vmctx, value]);
3814            }
3815        }
3816        #[cfg(not(feature = "wmemcheck"))]
3817        let _ = (builder, global_index, value);
3818    }
3819
3820    pub fn before_memory_grow(
3821        &mut self,
3822        builder: &mut FunctionBuilder,
3823        num_pages: ir::Value,
3824        mem_index: MemoryIndex,
3825    ) {
3826        #[cfg(feature = "wmemcheck")]
3827        if self.compiler.wmemcheck && mem_index.as_u32() == 0 {
3828            let update_mem_size = self.builtin_functions.update_mem_size(builder.func);
3829            let vmctx = self.vmctx_val(&mut builder.cursor());
3830            builder.ins().call(update_mem_size, &[vmctx, num_pages]);
3831        }
3832        #[cfg(not(feature = "wmemcheck"))]
3833        let _ = (builder, num_pages, mem_index);
3834    }
3835
3836    /// If the ISA has rounding instructions, let Cranelift use them. But if
3837    /// not, lower to a libcall here, rather than having Cranelift do it. We
3838    /// can pass our libcall the vmctx pointer, which we use for stack
3839    /// overflow checking.
3840    ///
3841    /// This helper is generic for all rounding instructions below, both for
3842    /// scalar and simd types. The `clif_round` argument is the CLIF-level
3843    /// rounding instruction to use if the ISA has the instruction, and the
3844    /// `round_builtin` helper is used to determine which element-level
3845    /// rounding operation builtin is used. Note that this handles the case
3846    /// when `value` is a vector by doing an element-wise libcall invocation.
3847    fn isa_round(
3848        &mut self,
3849        builder: &mut FunctionBuilder,
3850        value: ir::Value,
3851        clif_round: fn(FuncInstBuilder<'_, '_>, ir::Value) -> ir::Value,
3852        round_builtin: fn(&mut BuiltinFunctions, &mut Function) -> ir::FuncRef,
3853    ) -> ir::Value {
3854        if self.isa.has_round() {
3855            return clif_round(builder.ins(), value);
3856        }
3857
3858        let vmctx = self.vmctx_val(&mut builder.cursor());
3859        let round = round_builtin(&mut self.builtin_functions, builder.func);
3860        let round_one = |builder: &mut FunctionBuilder, value: ir::Value| {
3861            let call = builder.ins().call(round, &[vmctx, value]);
3862            *builder.func.dfg.inst_results(call).first().unwrap()
3863        };
3864
3865        let ty = builder.func.dfg.value_type(value);
3866        if !ty.is_vector() {
3867            return round_one(builder, value);
3868        }
3869
3870        assert_eq!(ty.bits(), 128);
3871        let zero = builder.func.dfg.constants.insert(V128Imm([0; 16]).into());
3872        let mut result = builder.ins().vconst(ty, zero);
3873        for i in 0..u8::try_from(ty.lane_count()).unwrap() {
3874            let element = builder.ins().extractlane(value, i);
3875            let element_rounded = round_one(builder, element);
3876            result = builder.ins().insertlane(result, element_rounded, i);
3877        }
3878        result
3879    }
3880
3881    pub fn ceil_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3882        self.isa_round(
3883            builder,
3884            value,
3885            |ins, val| ins.ceil(val),
3886            BuiltinFunctions::ceil_f32,
3887        )
3888    }
3889
3890    pub fn ceil_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3891        self.isa_round(
3892            builder,
3893            value,
3894            |ins, val| ins.ceil(val),
3895            BuiltinFunctions::ceil_f64,
3896        )
3897    }
3898
3899    pub fn ceil_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3900        self.isa_round(
3901            builder,
3902            value,
3903            |ins, val| ins.ceil(val),
3904            BuiltinFunctions::ceil_f32,
3905        )
3906    }
3907
3908    pub fn ceil_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3909        self.isa_round(
3910            builder,
3911            value,
3912            |ins, val| ins.ceil(val),
3913            BuiltinFunctions::ceil_f64,
3914        )
3915    }
3916
3917    pub fn floor_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3918        self.isa_round(
3919            builder,
3920            value,
3921            |ins, val| ins.floor(val),
3922            BuiltinFunctions::floor_f32,
3923        )
3924    }
3925
3926    pub fn floor_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3927        self.isa_round(
3928            builder,
3929            value,
3930            |ins, val| ins.floor(val),
3931            BuiltinFunctions::floor_f64,
3932        )
3933    }
3934
3935    pub fn floor_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3936        self.isa_round(
3937            builder,
3938            value,
3939            |ins, val| ins.floor(val),
3940            BuiltinFunctions::floor_f32,
3941        )
3942    }
3943
3944    pub fn floor_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3945        self.isa_round(
3946            builder,
3947            value,
3948            |ins, val| ins.floor(val),
3949            BuiltinFunctions::floor_f64,
3950        )
3951    }
3952
3953    pub fn trunc_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3954        self.isa_round(
3955            builder,
3956            value,
3957            |ins, val| ins.trunc(val),
3958            BuiltinFunctions::trunc_f32,
3959        )
3960    }
3961
3962    pub fn trunc_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3963        self.isa_round(
3964            builder,
3965            value,
3966            |ins, val| ins.trunc(val),
3967            BuiltinFunctions::trunc_f64,
3968        )
3969    }
3970
3971    pub fn trunc_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3972        self.isa_round(
3973            builder,
3974            value,
3975            |ins, val| ins.trunc(val),
3976            BuiltinFunctions::trunc_f32,
3977        )
3978    }
3979
3980    pub fn trunc_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3981        self.isa_round(
3982            builder,
3983            value,
3984            |ins, val| ins.trunc(val),
3985            BuiltinFunctions::trunc_f64,
3986        )
3987    }
3988
3989    pub fn nearest_f32(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3990        self.isa_round(
3991            builder,
3992            value,
3993            |ins, val| ins.nearest(val),
3994            BuiltinFunctions::nearest_f32,
3995        )
3996    }
3997
3998    pub fn nearest_f64(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
3999        self.isa_round(
4000            builder,
4001            value,
4002            |ins, val| ins.nearest(val),
4003            BuiltinFunctions::nearest_f64,
4004        )
4005    }
4006
4007    pub fn nearest_f32x4(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4008        self.isa_round(
4009            builder,
4010            value,
4011            |ins, val| ins.nearest(val),
4012            BuiltinFunctions::nearest_f32,
4013        )
4014    }
4015
4016    pub fn nearest_f64x2(&mut self, builder: &mut FunctionBuilder, value: ir::Value) -> ir::Value {
4017        self.isa_round(
4018            builder,
4019            value,
4020            |ins, val| ins.nearest(val),
4021            BuiltinFunctions::nearest_f64,
4022        )
4023    }
4024
4025    pub fn swizzle(
4026        &mut self,
4027        builder: &mut FunctionBuilder,
4028        a: ir::Value,
4029        b: ir::Value,
4030    ) -> ir::Value {
4031        // On x86, swizzle would typically be compiled to `pshufb`, except
4032        // that that's not available on CPUs that lack SSSE3. In that case,
4033        // fall back to a builtin function.
4034        if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4035            builder.ins().swizzle(a, b)
4036        } else {
4037            let swizzle = self.builtin_functions.i8x16_swizzle(builder.func);
4038            let vmctx = self.vmctx_val(&mut builder.cursor());
4039            let call = builder.ins().call(swizzle, &[vmctx, a, b]);
4040            *builder.func.dfg.inst_results(call).first().unwrap()
4041        }
4042    }
4043
4044    pub fn relaxed_swizzle(
4045        &mut self,
4046        builder: &mut FunctionBuilder,
4047        a: ir::Value,
4048        b: ir::Value,
4049    ) -> ir::Value {
4050        // As above, fall back to a builtin if we lack SSSE3.
4051        if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4052            if !self.is_x86() || self.relaxed_simd_deterministic() {
4053                builder.ins().swizzle(a, b)
4054            } else {
4055                builder.ins().x86_pshufb(a, b)
4056            }
4057        } else {
4058            let swizzle = self.builtin_functions.i8x16_swizzle(builder.func);
4059            let vmctx = self.vmctx_val(&mut builder.cursor());
4060            let call = builder.ins().call(swizzle, &[vmctx, a, b]);
4061            *builder.func.dfg.inst_results(call).first().unwrap()
4062        }
4063    }
4064
4065    pub fn i8x16_shuffle(
4066        &mut self,
4067        builder: &mut FunctionBuilder,
4068        a: ir::Value,
4069        b: ir::Value,
4070        lanes: &[u8; 16],
4071    ) -> ir::Value {
4072        // As with swizzle, i8x16.shuffle would also commonly be implemented
4073        // with pshufb, so if we lack SSSE3, fall back to a builtin.
4074        if !self.is_x86() || self.isa.has_x86_pshufb_lowering() {
4075            let lanes = ConstantData::from(&lanes[..]);
4076            let mask = builder.func.dfg.immediates.push(lanes);
4077            builder.ins().shuffle(a, b, mask)
4078        } else {
4079            let lanes = builder
4080                .func
4081                .dfg
4082                .constants
4083                .insert(ConstantData::from(&lanes[..]));
4084            let lanes = builder.ins().vconst(I8X16, lanes);
4085            let i8x16_shuffle = self.builtin_functions.i8x16_shuffle(builder.func);
4086            let vmctx = self.vmctx_val(&mut builder.cursor());
4087            let call = builder.ins().call(i8x16_shuffle, &[vmctx, a, b, lanes]);
4088            *builder.func.dfg.inst_results(call).first().unwrap()
4089        }
4090    }
4091
4092    pub fn fma_f32x4(
4093        &mut self,
4094        builder: &mut FunctionBuilder,
4095        a: ir::Value,
4096        b: ir::Value,
4097        c: ir::Value,
4098    ) -> ir::Value {
4099        if self.has_native_fma() {
4100            builder.ins().fma(a, b, c)
4101        } else if self.relaxed_simd_deterministic() {
4102            // Deterministic semantics are "fused multiply and add".
4103            let fma = self.builtin_functions.fma_f32x4(builder.func);
4104            let vmctx = self.vmctx_val(&mut builder.cursor());
4105            let call = builder.ins().call(fma, &[vmctx, a, b, c]);
4106            *builder.func.dfg.inst_results(call).first().unwrap()
4107        } else {
4108            let mul = builder.ins().fmul(a, b);
4109            builder.ins().fadd(mul, c)
4110        }
4111    }
4112
4113    pub fn fma_f64x2(
4114        &mut self,
4115        builder: &mut FunctionBuilder,
4116        a: ir::Value,
4117        b: ir::Value,
4118        c: ir::Value,
4119    ) -> ir::Value {
4120        if self.has_native_fma() {
4121            builder.ins().fma(a, b, c)
4122        } else if self.relaxed_simd_deterministic() {
4123            // Deterministic semantics are "fused multiply and add".
4124            let fma = self.builtin_functions.fma_f64x2(builder.func);
4125            let vmctx = self.vmctx_val(&mut builder.cursor());
4126            let call = builder.ins().call(fma, &[vmctx, a, b, c]);
4127            *builder.func.dfg.inst_results(call).first().unwrap()
4128        } else {
4129            let mul = builder.ins().fmul(a, b);
4130            builder.ins().fadd(mul, c)
4131        }
4132    }
4133
4134    pub fn isa(&self) -> &dyn TargetIsa {
4135        &*self.isa
4136    }
4137
4138    pub fn trap(&mut self, builder: &mut FunctionBuilder, trap: ir::TrapCode) {
4139        match (
4140            self.clif_instruction_traps_enabled(),
4141            crate::clif_trap_to_env_trap(trap),
4142        ) {
4143            // If libcall traps are disabled or there's no wasmtime-defined trap
4144            // code for this, then emit a native trap instruction.
4145            (true, _) | (_, None) => {
4146                builder.ins().trap(trap);
4147            }
4148            // ... otherwise with libcall traps explicitly enabled and a
4149            // wasmtime-based trap code invoke the libcall to raise a trap and
4150            // pass in our trap code. Leave a debug `unreachable` in place
4151            // afterwards as a defense-in-depth measure.
4152            (false, Some(trap)) => {
4153                let libcall = self.builtin_functions.trap(&mut builder.func);
4154                let vmctx = self.vmctx_val(&mut builder.cursor());
4155                let trap_code = builder.ins().iconst(I8, i64::from(trap as u8));
4156                builder.ins().call(libcall, &[vmctx, trap_code]);
4157                let raise = self.builtin_functions.raise(&mut builder.func);
4158                builder.ins().call(raise, &[vmctx]);
4159                builder.ins().trap(TRAP_INTERNAL_ASSERT);
4160            }
4161        }
4162    }
4163
4164    pub fn trapz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
4165        if self.clif_instruction_traps_enabled() {
4166            builder.ins().trapz(value, trap);
4167        } else {
4168            let ty = builder.func.dfg.value_type(value);
4169            let zero = builder.ins().iconst(ty, 0);
4170            let cmp = builder.ins().icmp(IntCC::Equal, value, zero);
4171            self.conditionally_trap(builder, cmp, trap);
4172        }
4173    }
4174
4175    pub fn trapnz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
4176        if self.clif_instruction_traps_enabled() {
4177            builder.ins().trapnz(value, trap);
4178        } else {
4179            let ty = builder.func.dfg.value_type(value);
4180            let zero = builder.ins().iconst(ty, 0);
4181            let cmp = builder.ins().icmp(IntCC::NotEqual, value, zero);
4182            self.conditionally_trap(builder, cmp, trap);
4183        }
4184    }
4185
4186    pub fn uadd_overflow_trap(
4187        &mut self,
4188        builder: &mut FunctionBuilder,
4189        lhs: ir::Value,
4190        rhs: ir::Value,
4191        trap: ir::TrapCode,
4192    ) -> ir::Value {
4193        if self.clif_instruction_traps_enabled() {
4194            builder.ins().uadd_overflow_trap(lhs, rhs, trap)
4195        } else {
4196            let (ret, overflow) = builder.ins().uadd_overflow(lhs, rhs);
4197            self.conditionally_trap(builder, overflow, trap);
4198            ret
4199        }
4200    }
4201
4202    pub fn translate_sdiv(
4203        &mut self,
4204        builder: &mut FunctionBuilder,
4205        lhs: ir::Value,
4206        rhs: ir::Value,
4207    ) -> ir::Value {
4208        self.guard_signed_divide(builder, lhs, rhs);
4209        builder.ins().sdiv(lhs, rhs)
4210    }
4211
4212    pub fn translate_udiv(
4213        &mut self,
4214        builder: &mut FunctionBuilder,
4215        lhs: ir::Value,
4216        rhs: ir::Value,
4217    ) -> ir::Value {
4218        self.guard_zero_divisor(builder, rhs);
4219        builder.ins().udiv(lhs, rhs)
4220    }
4221
4222    pub fn translate_srem(
4223        &mut self,
4224        builder: &mut FunctionBuilder,
4225        lhs: ir::Value,
4226        rhs: ir::Value,
4227    ) -> ir::Value {
4228        self.guard_zero_divisor(builder, rhs);
4229        builder.ins().srem(lhs, rhs)
4230    }
4231
4232    pub fn translate_urem(
4233        &mut self,
4234        builder: &mut FunctionBuilder,
4235        lhs: ir::Value,
4236        rhs: ir::Value,
4237    ) -> ir::Value {
4238        self.guard_zero_divisor(builder, rhs);
4239        builder.ins().urem(lhs, rhs)
4240    }
4241
4242    pub fn translate_fcvt_to_sint(
4243        &mut self,
4244        builder: &mut FunctionBuilder,
4245        ty: ir::Type,
4246        val: ir::Value,
4247    ) -> ir::Value {
4248        // NB: for now avoid translating this entire instruction to CLIF and
4249        // just do it in a libcall.
4250        if !self.clif_instruction_traps_enabled() {
4251            self.guard_fcvt_to_int(builder, ty, val, true);
4252        }
4253        builder.ins().fcvt_to_sint(ty, val)
4254    }
4255
4256    pub fn translate_fcvt_to_uint(
4257        &mut self,
4258        builder: &mut FunctionBuilder,
4259        ty: ir::Type,
4260        val: ir::Value,
4261    ) -> ir::Value {
4262        if !self.clif_instruction_traps_enabled() {
4263            self.guard_fcvt_to_int(builder, ty, val, false);
4264        }
4265        builder.ins().fcvt_to_uint(ty, val)
4266    }
4267
4268    /// Returns whether it's acceptable to rely on traps in CLIF memory-related
4269    /// instructions (e.g. loads and stores).
4270    ///
4271    /// This is enabled if `signals_based_traps` is `true` since signal handlers
4272    /// are available, but this is additionally forcibly disabled if Pulley is
4273    /// being targeted since the Pulley runtime doesn't catch segfaults for
4274    /// itself.
4275    pub fn clif_memory_traps_enabled(&self) -> bool {
4276        self.tunables.signals_based_traps && !self.is_pulley()
4277    }
4278
4279    /// Returns whether it's acceptable to have CLIF instructions natively trap,
4280    /// such as division-by-zero.
4281    ///
4282    /// This enabled if `signals_based_traps` is `true` or on Pulley
4283    /// unconditionally since Pulley doesn't use hardware-based traps in its
4284    /// runtime.
4285    pub fn clif_instruction_traps_enabled(&self) -> bool {
4286        self.tunables.signals_based_traps || self.is_pulley()
4287    }
4288
4289    /// Returns whether loads from the null address are allowed as signals of
4290    /// whether to trap or not.
4291    pub fn load_from_zero_allowed(&self) -> bool {
4292        // Pulley allows loads-from-zero and otherwise this is only allowed with
4293        // traps + spectre mitigations.
4294        self.is_pulley()
4295            || (self.clif_memory_traps_enabled() && self.heap_access_spectre_mitigation())
4296    }
4297
4298    /// Returns whether translation is happening for Pulley bytecode.
4299    pub fn is_pulley(&self) -> bool {
4300        self.isa.triple().is_pulley()
4301    }
4302}
4303
4304// Helper function to convert an `IndexType` to an `ir::Type`.
4305//
4306// Implementing From/Into trait for `IndexType` or `ir::Type` would
4307// introduce an extra dependency between `wasmtime_types` and `cranelift_codegen`.
4308fn index_type_to_ir_type(index_type: IndexType) -> ir::Type {
4309    match index_type {
4310        IndexType::I32 => I32,
4311        IndexType::I64 => I64,
4312    }
4313}