wasmtime_internal_cranelift/func_environ/gc/
enabled.rs

1use super::{ArrayInit, GcCompiler};
2use crate::bounds_checks::BoundsCheck;
3use crate::func_environ::{Extension, FuncEnvironment};
4use crate::translate::{Heap, HeapData, StructFieldsVec, TargetEnvironment};
5use crate::{Reachability, TRAP_INTERNAL_ASSERT};
6use cranelift_codegen::ir::immediates::Offset32;
7use cranelift_codegen::ir::{
8    Block, BlockArg, ExceptionTableData, ExceptionTableItem, ExceptionTag,
9};
10use cranelift_codegen::{
11    cursor::FuncCursor,
12    ir::{self, InstBuilder, condcodes::IntCC},
13};
14use cranelift_entity::packed_option::ReservedValue;
15use cranelift_frontend::FunctionBuilder;
16use smallvec::{SmallVec, smallvec};
17use wasmtime_environ::{
18    Collector, GcArrayLayout, GcLayout, GcStructLayout, I31_DISCRIMINANT, ModuleInternedTypeIndex,
19    PtrSize, TagIndex, TypeIndex, VMGcKind, WasmCompositeInnerType, WasmHeapTopType, WasmHeapType,
20    WasmRefType, WasmResult, WasmStorageType, WasmValType, wasm_unsupported,
21};
22
23#[cfg(feature = "gc-drc")]
24mod drc;
25#[cfg(feature = "gc-null")]
26mod null;
27
28/// Get the default GC compiler.
29pub fn gc_compiler(func_env: &mut FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
30    // If this function requires a GC compiler, that is not too bad of an
31    // over-approximation for it requiring a GC heap.
32    func_env.needs_gc_heap = true;
33
34    match func_env.tunables.collector {
35        #[cfg(feature = "gc-drc")]
36        Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
37        #[cfg(not(feature = "gc-drc"))]
38        Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
39            "the DRC collector is unavailable because the `gc-drc` feature \
40             was disabled at compile time",
41        )),
42
43        #[cfg(feature = "gc-null")]
44        Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
45        #[cfg(not(feature = "gc-null"))]
46        Some(Collector::Null) => Err(wasm_unsupported!(
47            "the null collector is unavailable because the `gc-null` feature \
48             was disabled at compile time",
49        )),
50
51        #[cfg(any(feature = "gc-drc", feature = "gc-null"))]
52        None => Err(wasm_unsupported!(
53            "support for GC types disabled at configuration time"
54        )),
55        #[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
56        None => Err(wasm_unsupported!(
57            "support for GC types disabled because no collector implementation \
58             was selected at compile time; enable one of the `gc-drc` or \
59             `gc-null` features",
60        )),
61    }
62}
63
64#[cfg_attr(
65    not(feature = "gc-drc"),
66    expect(dead_code, reason = "easier to define")
67)]
68fn unbarriered_load_gc_ref(
69    builder: &mut FunctionBuilder,
70    ty: WasmHeapType,
71    ptr_to_gc_ref: ir::Value,
72    flags: ir::MemFlags,
73) -> WasmResult<ir::Value> {
74    debug_assert!(ty.is_vmgcref_type());
75    let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
76    if ty != WasmHeapType::I31 {
77        builder.declare_value_needs_stack_map(gc_ref);
78    }
79    Ok(gc_ref)
80}
81
82#[cfg_attr(
83    not(any(feature = "gc-drc", feature = "gc-null")),
84    expect(dead_code, reason = "easier to define")
85)]
86fn unbarriered_store_gc_ref(
87    builder: &mut FunctionBuilder,
88    ty: WasmHeapType,
89    dst: ir::Value,
90    gc_ref: ir::Value,
91    flags: ir::MemFlags,
92) -> WasmResult<()> {
93    debug_assert!(ty.is_vmgcref_type());
94    builder.ins().store(flags, gc_ref, dst, 0);
95    Ok(())
96}
97
98/// Emit code to read a struct field or array element from its raw address in
99/// the GC heap.
100///
101/// The given address MUST have already been bounds-checked via
102/// `prepare_gc_ref_access`.
103fn read_field_at_addr(
104    func_env: &mut FuncEnvironment<'_>,
105    builder: &mut FunctionBuilder<'_>,
106    ty: WasmStorageType,
107    addr: ir::Value,
108    extension: Option<Extension>,
109) -> WasmResult<ir::Value> {
110    assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
111    assert_eq!(
112        extension.is_some(),
113        matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
114    );
115
116    // Data inside GC objects is always little endian.
117    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
118
119    let value = match ty {
120        WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
121        WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
122        WasmStorageType::Val(v) => match v {
123            WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
124            WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
125            WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
126            WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
127            WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
128            WasmValType::Ref(r) => match r.heap_type.top() {
129                WasmHeapTopType::Any | WasmHeapTopType::Extern | WasmHeapTopType::Exn => {
130                    gc_compiler(func_env)?
131                        .translate_read_gc_reference(func_env, builder, r, addr, flags)?
132                }
133                WasmHeapTopType::Func => {
134                    let expected_ty = match r.heap_type {
135                        WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
136                        WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
137                        WasmHeapType::NoFunc => {
138                            let null = builder.ins().iconst(func_env.pointer_type(), 0);
139                            if !r.nullable {
140                                // Because `nofunc` is uninhabited, and this
141                                // reference is non-null, this is unreachable
142                                // code. Unconditionally trap via conditional
143                                // trap instructions to avoid inserting block
144                                // terminators in the middle of this block.
145                                builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
146                            }
147                            return Ok(null);
148                        }
149                        _ => unreachable!("not a function heap type"),
150                    };
151                    let expected_ty = builder
152                        .ins()
153                        .iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
154
155                    let vmctx = func_env.vmctx_val(&mut builder.cursor());
156
157                    let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
158                    let get_interned_func_ref = func_env
159                        .builtin_functions
160                        .get_interned_func_ref(builder.func);
161
162                    let call_inst = builder
163                        .ins()
164                        .call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
165                    builder.func.dfg.first_result(call_inst)
166                }
167                WasmHeapTopType::Cont => {
168                    // TODO(#10248) GC integration for stack switching
169                    return Err(wasmtime_environ::WasmError::Unsupported(
170                        "Stack switching feature not compatible with GC, yet".to_string(),
171                    ));
172                }
173            },
174        },
175    };
176
177    let value = match extension {
178        Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
179        Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
180        None => value,
181    };
182
183    Ok(value)
184}
185
186fn write_func_ref_at_addr(
187    func_env: &mut FuncEnvironment<'_>,
188    builder: &mut FunctionBuilder<'_>,
189    ref_type: WasmRefType,
190    flags: ir::MemFlags,
191    field_addr: ir::Value,
192    func_ref: ir::Value,
193) -> WasmResult<()> {
194    assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
195
196    let vmctx = func_env.vmctx_val(&mut builder.cursor());
197
198    let intern_func_ref_for_gc_heap = func_env
199        .builtin_functions
200        .intern_func_ref_for_gc_heap(builder.func);
201
202    let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
203        let null = builder.ins().iconst(func_env.pointer_type(), 0);
204        if !ref_type.nullable {
205            // Because `nofunc` is uninhabited, and this reference is
206            // non-null, this is unreachable code. Unconditionally trap
207            // via conditional trap instructions to avoid inserting
208            // block terminators in the middle of this block.
209            builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
210        }
211        null
212    } else {
213        func_ref
214    };
215
216    // Convert the raw `funcref` into a `FuncRefTableId` for use in the
217    // GC heap.
218    let call_inst = builder
219        .ins()
220        .call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
221    let func_ref_id = builder.func.dfg.first_result(call_inst);
222    let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
223
224    // Store the id in the field.
225    builder.ins().store(flags, func_ref_id, field_addr, 0);
226
227    Ok(())
228}
229
230fn write_field_at_addr(
231    func_env: &mut FuncEnvironment<'_>,
232    builder: &mut FunctionBuilder<'_>,
233    field_ty: WasmStorageType,
234    field_addr: ir::Value,
235    new_val: ir::Value,
236) -> WasmResult<()> {
237    // Data inside GC objects is always little endian.
238    let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
239
240    match field_ty {
241        WasmStorageType::I8 => {
242            builder.ins().istore8(flags, new_val, field_addr, 0);
243        }
244        WasmStorageType::I16 => {
245            builder.ins().istore16(flags, new_val, field_addr, 0);
246        }
247        WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
248            write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
249        }
250        WasmStorageType::Val(WasmValType::Ref(r)) => {
251            gc_compiler(func_env)?
252                .translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
253        }
254        WasmStorageType::Val(_) => {
255            assert_eq!(
256                builder.func.dfg.value_type(new_val).bytes(),
257                wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
258            );
259            builder.ins().store(flags, new_val, field_addr, 0);
260        }
261    }
262    Ok(())
263}
264
265pub fn translate_struct_new(
266    func_env: &mut FuncEnvironment<'_>,
267    builder: &mut FunctionBuilder<'_>,
268    struct_type_index: TypeIndex,
269    fields: &[ir::Value],
270) -> WasmResult<ir::Value> {
271    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
272}
273
274fn default_value(
275    cursor: &mut FuncCursor,
276    func_env: &FuncEnvironment<'_>,
277    ty: &WasmStorageType,
278) -> ir::Value {
279    match ty {
280        WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
281        WasmStorageType::Val(v) => match v {
282            WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
283            WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
284            WasmValType::F32 => cursor.ins().f32const(0.0),
285            WasmValType::F64 => cursor.ins().f64const(0.0),
286            WasmValType::V128 => {
287                let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
288                cursor.ins().vconst(ir::types::I8X16, c)
289            }
290            WasmValType::Ref(r) => {
291                assert!(r.nullable);
292                let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
293
294                // NB: The collector doesn't need to know about null references.
295                let _ = needs_stack_map;
296
297                cursor.ins().iconst(ty, 0)
298            }
299        },
300    }
301}
302
303pub fn translate_struct_new_default(
304    func_env: &mut FuncEnvironment<'_>,
305    builder: &mut FunctionBuilder<'_>,
306    struct_type_index: TypeIndex,
307) -> WasmResult<ir::Value> {
308    let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
309    let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
310    let fields = struct_ty
311        .fields
312        .iter()
313        .map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
314        .collect::<StructFieldsVec>();
315    gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
316}
317
318pub fn translate_struct_get(
319    func_env: &mut FuncEnvironment<'_>,
320    builder: &mut FunctionBuilder<'_>,
321    struct_type_index: TypeIndex,
322    field_index: u32,
323    struct_ref: ir::Value,
324    extension: Option<Extension>,
325) -> WasmResult<ir::Value> {
326    log::trace!(
327        "translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})"
328    );
329
330    // TODO: If we know we have a `(ref $my_struct)` here, instead of maybe a
331    // `(ref null $my_struct)`, we could omit the `trapz`. But plumbing that
332    // type info from `wasmparser` and through to here is a bit funky.
333    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
334
335    let field_index = usize::try_from(field_index).unwrap();
336    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
337
338    let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
339    let struct_size = struct_layout.size;
340
341    let field_offset = struct_layout.fields[field_index].offset;
342    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
343    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
344    assert!(field_offset + field_size <= struct_size);
345
346    let field_addr = func_env.prepare_gc_ref_access(
347        builder,
348        struct_ref,
349        BoundsCheck::StaticObjectField {
350            offset: field_offset,
351            access_size: u8::try_from(field_size).unwrap(),
352            object_size: struct_size,
353        },
354    );
355
356    let result = read_field_at_addr(
357        func_env,
358        builder,
359        field_ty.element_type,
360        field_addr,
361        extension,
362    );
363    log::trace!("translate_struct_get(..) -> {result:?}");
364    result
365}
366
367pub fn translate_struct_set(
368    func_env: &mut FuncEnvironment<'_>,
369    builder: &mut FunctionBuilder<'_>,
370    struct_type_index: TypeIndex,
371    field_index: u32,
372    struct_ref: ir::Value,
373    new_val: ir::Value,
374) -> WasmResult<()> {
375    log::trace!(
376        "translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
377    );
378
379    // TODO: See comment in `translate_struct_get` about the `trapz`.
380    func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
381
382    let field_index = usize::try_from(field_index).unwrap();
383    let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
384
385    let struct_layout = func_env.struct_or_exn_layout(interned_type_index);
386    let struct_size = struct_layout.size;
387
388    let field_offset = struct_layout.fields[field_index].offset;
389    let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
390    let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
391    assert!(field_offset + field_size <= struct_size);
392
393    let field_addr = func_env.prepare_gc_ref_access(
394        builder,
395        struct_ref,
396        BoundsCheck::StaticObjectField {
397            offset: field_offset,
398            access_size: u8::try_from(field_size).unwrap(),
399            object_size: struct_size,
400        },
401    );
402
403    write_field_at_addr(
404        func_env,
405        builder,
406        field_ty.element_type,
407        field_addr,
408        new_val,
409    )?;
410
411    log::trace!("translate_struct_set: finished");
412    Ok(())
413}
414
415pub fn translate_exn_unbox(
416    func_env: &mut FuncEnvironment<'_>,
417    builder: &mut FunctionBuilder<'_>,
418    tag_index: TagIndex,
419    exn_ref: ir::Value,
420) -> WasmResult<SmallVec<[ir::Value; 4]>> {
421    log::trace!("translate_exn_unbox({tag_index:?}, {exn_ref:?})");
422
423    // We know that the `exn_ref` is not null because we reach this
424    // operation only in catch blocks, and throws are initiated from
425    // runtime code that checks for nulls first.
426
427    // Get the GcExceptionLayout associated with this tag's
428    // function type, and generate loads for each field.
429    let exception_ty_idx = func_env
430        .exception_type_from_tag(tag_index)
431        .unwrap_module_type_index();
432    let exception_ty = func_env.types.unwrap_exn(exception_ty_idx)?;
433    let exn_layout = func_env.struct_or_exn_layout(exception_ty_idx);
434    let exn_size = exn_layout.size;
435
436    // Gather accesses first because these require a borrow on
437    // `func_env`, which we later mutate below via
438    // `prepare_gc_ref_access()`.
439    let mut accesses: SmallVec<[_; 4]> = smallvec![];
440    for (field_ty, field_layout) in exception_ty.fields.iter().zip(exn_layout.fields.iter()) {
441        accesses.push((field_layout.offset, field_ty.element_type));
442    }
443
444    let mut result = smallvec![];
445    for (field_offset, field_ty) in accesses {
446        let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty);
447        assert!(field_offset + field_size <= exn_size);
448        let field_addr = func_env.prepare_gc_ref_access(
449            builder,
450            exn_ref,
451            BoundsCheck::StaticObjectField {
452                offset: field_offset,
453                access_size: u8::try_from(field_size).unwrap(),
454                object_size: exn_size,
455            },
456        );
457
458        let value = read_field_at_addr(func_env, builder, field_ty, field_addr, None)?;
459        result.push(value);
460    }
461
462    log::trace!("translate_exn_unbox(..) -> {result:?}");
463    Ok(result)
464}
465
466pub fn translate_exn_throw(
467    func_env: &mut FuncEnvironment<'_>,
468    builder: &mut FunctionBuilder<'_>,
469    tag_index: TagIndex,
470    args: &[ir::Value],
471    handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
472) -> WasmResult<()> {
473    let (instance_id, defined_tag_id) = func_env.get_instance_and_tag(builder, tag_index);
474    let exnref = gc_compiler(func_env)?.alloc_exn(
475        func_env,
476        builder,
477        tag_index,
478        args,
479        instance_id,
480        defined_tag_id,
481    )?;
482    translate_exn_throw_ref(func_env, builder, exnref, handlers)
483}
484
485pub fn translate_exn_throw_ref(
486    func_env: &mut FuncEnvironment<'_>,
487    builder: &mut FunctionBuilder<'_>,
488    exnref: ir::Value,
489    handlers: impl IntoIterator<Item = (Option<ExceptionTag>, Block)>,
490) -> WasmResult<()> {
491    let builtin = func_env.builtin_functions.throw_ref(builder.func);
492    let sig = builder.func.dfg.ext_funcs[builtin].signature;
493    let vmctx = func_env.vmctx_val(&mut builder.cursor());
494
495    // Generate a `try_call` with handlers from the current
496    // stack. This libcall is unique among libcall implementations of
497    // opcodes: we know the others will not throw, but `throw_ref`'s
498    // entire purpose is to throw. So if there are any handlers in the
499    // local function body, we need to attach them to this callsite
500    // like any other.
501    let continuation = builder.create_block();
502    let current_block = builder.current_block().unwrap();
503    builder.insert_block_after(continuation, current_block);
504    let continuation_call = builder.func.dfg.block_call(continuation, &[]);
505    let mut table_items = vec![ExceptionTableItem::Context(vmctx)];
506    for (tag, block) in handlers {
507        let block_call = builder
508            .func
509            .dfg
510            .block_call(block, &[BlockArg::TryCallExn(0)]);
511        table_items.push(match tag {
512            Some(tag) => ExceptionTableItem::Tag(tag, block_call),
513            None => ExceptionTableItem::Default(block_call),
514        });
515    }
516    let etd = ExceptionTableData::new(sig, continuation_call, table_items);
517    let et = builder.func.dfg.exception_tables.push(etd);
518
519    builder.ins().try_call(builtin, &[vmctx, exnref], et);
520
521    builder.switch_to_block(continuation);
522    builder.seal_block(continuation);
523    func_env.trap(builder, crate::TRAP_UNREACHABLE);
524
525    Ok(())
526}
527
528pub fn translate_array_new(
529    func_env: &mut FuncEnvironment<'_>,
530    builder: &mut FunctionBuilder,
531    array_type_index: TypeIndex,
532    elem: ir::Value,
533    len: ir::Value,
534) -> WasmResult<ir::Value> {
535    log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
536    let result = gc_compiler(func_env)?.alloc_array(
537        func_env,
538        builder,
539        array_type_index,
540        ArrayInit::Fill { elem, len },
541    )?;
542    log::trace!("translate_array_new(..) -> {result:?}");
543    Ok(result)
544}
545
546pub fn translate_array_new_default(
547    func_env: &mut FuncEnvironment<'_>,
548    builder: &mut FunctionBuilder,
549    array_type_index: TypeIndex,
550    len: ir::Value,
551) -> WasmResult<ir::Value> {
552    log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
553
554    let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
555    let array_ty = func_env.types.unwrap_array(interned_ty)?;
556    let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
557    let result = gc_compiler(func_env)?.alloc_array(
558        func_env,
559        builder,
560        array_type_index,
561        ArrayInit::Fill { elem, len },
562    )?;
563    log::trace!("translate_array_new_default(..) -> {result:?}");
564    Ok(result)
565}
566
567pub fn translate_array_new_fixed(
568    func_env: &mut FuncEnvironment<'_>,
569    builder: &mut FunctionBuilder,
570    array_type_index: TypeIndex,
571    elems: &[ir::Value],
572) -> WasmResult<ir::Value> {
573    log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
574    let result = gc_compiler(func_env)?.alloc_array(
575        func_env,
576        builder,
577        array_type_index,
578        ArrayInit::Elems(elems),
579    )?;
580    log::trace!("translate_array_new_fixed(..) -> {result:?}");
581    Ok(result)
582}
583
584impl ArrayInit<'_> {
585    /// Get the length (as an `i32`-typed `ir::Value`) of these array elements.
586    #[cfg_attr(
587        not(any(feature = "gc-drc", feature = "gc-null")),
588        expect(dead_code, reason = "easier to define")
589    )]
590    fn len(self, pos: &mut FuncCursor) -> ir::Value {
591        match self {
592            ArrayInit::Fill { len, .. } => len,
593            ArrayInit::Elems(e) => {
594                let len = u32::try_from(e.len()).unwrap();
595                pos.ins().iconst(ir::types::I32, i64::from(len))
596            }
597        }
598    }
599
600    /// Initialize a newly-allocated array's elements.
601    #[cfg_attr(
602        not(any(feature = "gc-drc", feature = "gc-null")),
603        expect(dead_code, reason = "easier to define")
604    )]
605    fn initialize(
606        self,
607        func_env: &mut FuncEnvironment<'_>,
608        builder: &mut FunctionBuilder<'_>,
609        interned_type_index: ModuleInternedTypeIndex,
610        base_size: u32,
611        size: ir::Value,
612        elems_addr: ir::Value,
613        mut init_field: impl FnMut(
614            &mut FuncEnvironment<'_>,
615            &mut FunctionBuilder<'_>,
616            WasmStorageType,
617            ir::Value,
618            ir::Value,
619        ) -> WasmResult<()>,
620    ) -> WasmResult<()> {
621        log::trace!(
622            "initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
623        );
624
625        assert!(!func_env.types[interned_type_index].composite_type.shared);
626        let array_ty = func_env.types[interned_type_index]
627            .composite_type
628            .inner
629            .unwrap_array();
630        let elem_ty = array_ty.0.element_type;
631        let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
632        let pointer_type = func_env.pointer_type();
633        let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
634        match self {
635            ArrayInit::Elems(elems) => {
636                let mut elem_addr = elems_addr;
637                for val in elems {
638                    init_field(func_env, builder, elem_ty, elem_addr, *val)?;
639                    elem_addr = builder.ins().iadd(elem_addr, elem_size);
640                }
641            }
642            ArrayInit::Fill { elem, len: _ } => {
643                // Compute the end address of the elements.
644                let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
645                let array_addr = builder.ins().isub(elems_addr, base_size);
646                let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
647                let elems_end = builder.ins().iadd(array_addr, size);
648
649                emit_array_fill_impl(
650                    func_env,
651                    builder,
652                    elems_addr,
653                    elem_size,
654                    elems_end,
655                    |func_env, builder, elem_addr| {
656                        init_field(func_env, builder, elem_ty, elem_addr, elem)
657                    },
658                )?;
659            }
660        }
661        log::trace!("initialize_array: finished");
662        Ok(())
663    }
664}
665
666fn emit_array_fill_impl(
667    func_env: &mut FuncEnvironment<'_>,
668    builder: &mut FunctionBuilder<'_>,
669    elem_addr: ir::Value,
670    elem_size: ir::Value,
671    fill_end: ir::Value,
672    mut emit_elem_write: impl FnMut(
673        &mut FuncEnvironment<'_>,
674        &mut FunctionBuilder<'_>,
675        ir::Value,
676    ) -> WasmResult<()>,
677) -> WasmResult<()> {
678    log::trace!(
679        "emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})"
680    );
681
682    let pointer_ty = func_env.pointer_type();
683
684    assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
685    assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
686    assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
687
688    // Loop to fill the elements, emitting the equivalent of the following
689    // pseudo-CLIF:
690    //
691    // current_block:
692    //     ...
693    //     jump loop_header_block(elem_addr)
694    //
695    // loop_header_block(elem_addr: i32):
696    //     done = icmp eq elem_addr, fill_end
697    //     brif done, continue_block, loop_body_block
698    //
699    // loop_body_block:
700    //     emit_elem_write()
701    //     next_elem_addr = iadd elem_addr, elem_size
702    //     jump loop_header_block(next_elem_addr)
703    //
704    // continue_block:
705    //     ...
706
707    let current_block = builder.current_block().unwrap();
708    let loop_header_block = builder.create_block();
709    let loop_body_block = builder.create_block();
710    let continue_block = builder.create_block();
711
712    builder.ensure_inserted_block();
713    builder.insert_block_after(loop_header_block, current_block);
714    builder.insert_block_after(loop_body_block, loop_header_block);
715    builder.insert_block_after(continue_block, loop_body_block);
716
717    // Current block: jump to the loop header block with the first element's
718    // address.
719    builder.ins().jump(loop_header_block, &[elem_addr.into()]);
720
721    // Loop header block: check if we're done, then jump to either the continue
722    // block or the loop body block.
723    builder.switch_to_block(loop_header_block);
724    builder.append_block_param(loop_header_block, pointer_ty);
725    log::trace!("emit_array_fill_impl: loop header");
726    func_env.translate_loop_header(builder)?;
727    let elem_addr = builder.block_params(loop_header_block)[0];
728    let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
729    builder
730        .ins()
731        .brif(done, continue_block, &[], loop_body_block, &[]);
732
733    // Loop body block: write the value to the current element, compute the next
734    // element's address, and then jump back to the loop header block.
735    builder.switch_to_block(loop_body_block);
736    log::trace!("emit_array_fill_impl: loop body");
737    emit_elem_write(func_env, builder, elem_addr)?;
738    let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
739    builder
740        .ins()
741        .jump(loop_header_block, &[next_elem_addr.into()]);
742
743    // Continue...
744    builder.switch_to_block(continue_block);
745    log::trace!("emit_array_fill_impl: finished");
746    builder.seal_block(loop_header_block);
747    builder.seal_block(loop_body_block);
748    builder.seal_block(continue_block);
749    Ok(())
750}
751
752pub fn translate_array_fill(
753    func_env: &mut FuncEnvironment<'_>,
754    builder: &mut FunctionBuilder<'_>,
755    array_type_index: TypeIndex,
756    array_ref: ir::Value,
757    index: ir::Value,
758    value: ir::Value,
759    n: ir::Value,
760) -> WasmResult<()> {
761    log::trace!(
762        "translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
763    );
764
765    let len = translate_array_len(func_env, builder, array_ref)?;
766
767    // Check that the full range of elements we want to fill is within bounds.
768    let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
769    let out_of_bounds = builder
770        .ins()
771        .icmp(IntCC::UnsignedGreaterThan, end_index, len);
772    func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
773
774    // Get the address of the first element we want to fill.
775    let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
776    let ArraySizeInfo {
777        obj_size,
778        one_elem_size,
779        base_size,
780    } = emit_array_size_info(func_env, builder, interned_type_index, len);
781    let offset_in_elems = builder.ins().imul(index, one_elem_size);
782    let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
783    let elem_addr = func_env.prepare_gc_ref_access(
784        builder,
785        array_ref,
786        BoundsCheck::DynamicObjectField {
787            offset: obj_offset,
788            object_size: obj_size,
789        },
790    );
791
792    // Calculate the end address, just after the filled region.
793    let fill_size = builder.ins().imul(n, one_elem_size);
794    let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), fill_size);
795    let fill_end = builder.ins().iadd(elem_addr, fill_size);
796
797    let one_elem_size =
798        uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
799
800    let result = emit_array_fill_impl(
801        func_env,
802        builder,
803        elem_addr,
804        one_elem_size,
805        fill_end,
806        |func_env, builder, elem_addr| {
807            let elem_ty = func_env
808                .types
809                .unwrap_array(interned_type_index)?
810                .0
811                .element_type;
812            write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
813        },
814    );
815    log::trace!("translate_array_fill(..) -> {result:?}");
816    result
817}
818
819pub fn translate_array_len(
820    func_env: &mut FuncEnvironment<'_>,
821    builder: &mut FunctionBuilder,
822    array_ref: ir::Value,
823) -> WasmResult<ir::Value> {
824    log::trace!("translate_array_len({array_ref:?})");
825
826    func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
827
828    let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
829    let len_field = func_env.prepare_gc_ref_access(
830        builder,
831        array_ref,
832        // Note: We can't bounds check the whole array object's size because we
833        // don't know its length yet. Chicken and egg problem.
834        BoundsCheck::StaticOffset {
835            offset: len_offset,
836            access_size: u8::try_from(ir::types::I32.bytes()).unwrap(),
837        },
838    );
839    let result = builder.ins().load(
840        ir::types::I32,
841        ir::MemFlags::trusted().with_readonly(),
842        len_field,
843        0,
844    );
845    log::trace!("translate_array_len(..) -> {result:?}");
846    Ok(result)
847}
848
849struct ArraySizeInfo {
850    /// The `i32` size of the whole array object, in bytes.
851    obj_size: ir::Value,
852
853    /// The `i32` size of each one of the array's elements, in bytes.
854    one_elem_size: ir::Value,
855
856    /// The `i32` size of the array's base object, in bytes. This is also the
857    /// offset from the start of the array object to its elements.
858    base_size: ir::Value,
859}
860
861/// Emit code to get the dynamic size (in bytes) of a whole array object, along
862/// with some other related bits.
863fn emit_array_size_info(
864    func_env: &mut FuncEnvironment<'_>,
865    builder: &mut FunctionBuilder<'_>,
866    array_type_index: ModuleInternedTypeIndex,
867    // `i32` value containing the array's length.
868    array_len: ir::Value,
869) -> ArraySizeInfo {
870    let array_layout = func_env.array_layout(array_type_index);
871
872    // Note that we check for overflow below because we can't trust the array's
873    // length: it came from inside the GC heap.
874    //
875    // We check for 32-bit multiplication overflow by performing a 64-bit
876    // multiplication and testing the high bits.
877    let one_elem_size = builder
878        .ins()
879        .iconst(ir::types::I64, i64::from(array_layout.elem_size));
880    let array_len = builder.ins().uextend(ir::types::I64, array_len);
881    let all_elems_size = builder.ins().imul(one_elem_size, array_len);
882
883    let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
884    builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
885
886    let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
887    let base_size = builder
888        .ins()
889        .iconst(ir::types::I32, i64::from(array_layout.base_size));
890    let obj_size =
891        builder
892            .ins()
893            .uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
894
895    let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
896
897    ArraySizeInfo {
898        obj_size,
899        one_elem_size,
900        base_size,
901    }
902}
903
904/// Get the bounds-checked address of an element in an array.
905///
906/// The emitted code will trap if `index >= array.length`.
907///
908/// Returns the `ir::Value` containing the address of the `index`th element in
909/// the array. You may read or write a value of the array's element type at this
910/// address. You may not use it for any other kind of access, nor reuse this
911/// value across GC safepoints.
912fn array_elem_addr(
913    func_env: &mut FuncEnvironment<'_>,
914    builder: &mut FunctionBuilder<'_>,
915    array_type_index: ModuleInternedTypeIndex,
916    array_ref: ir::Value,
917    index: ir::Value,
918) -> ir::Value {
919    // First, assert that `index < array.length`.
920    //
921    // This check is visible at the Wasm-semantics level.
922    //
923    // TODO: We should emit spectre-safe bounds checks for array accesses (if
924    // configured) but we don't currently have a great way to do that here. The
925    // proper solution is to use linear memories to back GC heaps and reuse the
926    // code in `bounds_check.rs` to implement these bounds checks. That is all
927    // planned, but not yet implemented.
928
929    let len = translate_array_len(func_env, builder, array_ref).unwrap();
930
931    let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
932    func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
933
934    // Compute the size (in bytes) of the whole array object.
935    let ArraySizeInfo {
936        obj_size,
937        one_elem_size,
938        base_size,
939    } = emit_array_size_info(func_env, builder, array_type_index, len);
940
941    // Compute the offset of the `index`th element within the array object.
942    //
943    // NB: no need to check for overflow here, since at this point we know that
944    // `len * elem_size + base_size` did not overflow and `i < len`.
945    let offset_in_elems = builder.ins().imul(index, one_elem_size);
946    let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
947
948    // Finally, use the object size and element offset we just computed to
949    // perform our implementation-internal bounds checks.
950    //
951    // Checking the whole object's size, rather than the `index`th element's
952    // size allows these bounds checks to be deduplicated across repeated
953    // accesses to the same array at different indices.
954    //
955    // This check should not be visible to Wasm, and serve to protect us from
956    // our own implementation bugs. The goal is to keep any potential widgets
957    // confined within the GC heap, and turn what would otherwise be a security
958    // vulnerability into a simple bug.
959    //
960    // TODO: Ideally we should fold the first Wasm-visible bounds check into
961    // this internal bounds check, so that we aren't performing multiple,
962    // redundant bounds checks. But we should figure out how to do this in a way
963    // that doesn't defeat the object-size bounds checking's deduplication
964    // mentioned above.
965    func_env.prepare_gc_ref_access(
966        builder,
967        array_ref,
968        BoundsCheck::DynamicObjectField {
969            offset: offset_in_array,
970            object_size: obj_size,
971        },
972    )
973}
974
975pub fn translate_array_get(
976    func_env: &mut FuncEnvironment<'_>,
977    builder: &mut FunctionBuilder,
978    array_type_index: TypeIndex,
979    array_ref: ir::Value,
980    index: ir::Value,
981    extension: Option<Extension>,
982) -> WasmResult<ir::Value> {
983    log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
984
985    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
986    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
987
988    let array_ty = func_env.types.unwrap_array(array_type_index)?;
989    let elem_ty = array_ty.0.element_type;
990
991    let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
992    log::trace!("translate_array_get(..) -> {result:?}");
993    Ok(result)
994}
995
996pub fn translate_array_set(
997    func_env: &mut FuncEnvironment<'_>,
998    builder: &mut FunctionBuilder,
999    array_type_index: TypeIndex,
1000    array_ref: ir::Value,
1001    index: ir::Value,
1002    value: ir::Value,
1003) -> WasmResult<()> {
1004    log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
1005
1006    let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
1007    let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
1008
1009    let array_ty = func_env.types.unwrap_array(array_type_index)?;
1010    let elem_ty = array_ty.0.element_type;
1011
1012    write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
1013
1014    log::trace!("translate_array_set: finished");
1015    Ok(())
1016}
1017
1018pub fn translate_ref_test(
1019    func_env: &mut FuncEnvironment<'_>,
1020    builder: &mut FunctionBuilder<'_>,
1021    test_ty: WasmRefType,
1022    val: ir::Value,
1023    val_ty: WasmRefType,
1024) -> WasmResult<ir::Value> {
1025    log::trace!("translate_ref_test({test_ty:?}, {val:?})");
1026
1027    // First special case: testing for references to bottom types.
1028    if test_ty.heap_type.is_bottom() {
1029        let result = if test_ty.nullable {
1030            // All null references (within the same type hierarchy) match null
1031            // references to the bottom type.
1032            func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?
1033        } else {
1034            // `ref.test` is always false for non-nullable bottom types, as the
1035            // bottom types are uninhabited.
1036            builder.ins().iconst(ir::types::I32, 0)
1037        };
1038        log::trace!("translate_ref_test(..) -> {result:?}");
1039        return Ok(result);
1040    }
1041
1042    // And because `ref.test heap_ty` is only valid on operands whose type is in
1043    // the same type hierarchy as `heap_ty`, if `heap_ty` is its hierarchy's top
1044    // type, we only need to worry about whether we are testing for nullability
1045    // or not.
1046    if test_ty.heap_type.is_top() {
1047        let result = if test_ty.nullable {
1048            builder.ins().iconst(ir::types::I32, 1)
1049        } else {
1050            let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1051            let zero = builder.ins().iconst(ir::types::I32, 0);
1052            let one = builder.ins().iconst(ir::types::I32, 1);
1053            builder.ins().select(is_null, zero, one)
1054        };
1055        log::trace!("translate_ref_test(..) -> {result:?}");
1056        return Ok(result);
1057    }
1058
1059    // `i31ref`s are a little interesting because they don't point to GC
1060    // objects; we test the bit pattern of the reference itself.
1061    if test_ty.heap_type == WasmHeapType::I31 {
1062        let i31_mask = builder.ins().iconst(
1063            ir::types::I32,
1064            i64::from(wasmtime_environ::I31_DISCRIMINANT),
1065        );
1066        let is_i31 = builder.ins().band(val, i31_mask);
1067        let result = if test_ty.nullable {
1068            let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1069            builder.ins().bor(is_null, is_i31)
1070        } else {
1071            is_i31
1072        };
1073        log::trace!("translate_ref_test(..) -> {result:?}");
1074        return Ok(result);
1075    }
1076
1077    // Otherwise, in the general case, we need to inspect our given object's
1078    // actual type, which also requires null-checking and i31-checking it.
1079
1080    let is_any_hierarchy = test_ty.heap_type.top() == WasmHeapTopType::Any;
1081
1082    let non_null_block = builder.create_block();
1083    let non_null_non_i31_block = builder.create_block();
1084    let continue_block = builder.create_block();
1085
1086    // Current block: check if the reference is null and branch appropriately.
1087    let is_null = func_env.translate_ref_is_null(builder.cursor(), val, val_ty)?;
1088    let result_when_is_null = builder
1089        .ins()
1090        .iconst(ir::types::I32, test_ty.nullable as i64);
1091    builder.ins().brif(
1092        is_null,
1093        continue_block,
1094        &[result_when_is_null.into()],
1095        non_null_block,
1096        &[],
1097    );
1098
1099    // Non-null block: We know the GC ref is non-null, but we need to also check
1100    // for `i31` references that don't point to GC objects.
1101    builder.switch_to_block(non_null_block);
1102    log::trace!("translate_ref_test: non-null ref block");
1103    if is_any_hierarchy {
1104        let i31_mask = builder.ins().iconst(
1105            ir::types::I32,
1106            i64::from(wasmtime_environ::I31_DISCRIMINANT),
1107        );
1108        let is_i31 = builder.ins().band(val, i31_mask);
1109        // If it is an `i31`, then create the result value based on whether we
1110        // want `i31`s to pass the test or not.
1111        let result_when_is_i31 = builder.ins().iconst(
1112            ir::types::I32,
1113            matches!(
1114                test_ty.heap_type,
1115                WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
1116            ) as i64,
1117        );
1118        builder.ins().brif(
1119            is_i31,
1120            continue_block,
1121            &[result_when_is_i31.into()],
1122            non_null_non_i31_block,
1123            &[],
1124        );
1125    } else {
1126        // If we aren't testing the `any` hierarchy, the reference cannot be an
1127        // `i31ref`. Jump directly to the non-null and non-i31 block; rely on
1128        // branch folding during lowering to clean this up.
1129        builder.ins().jump(non_null_non_i31_block, &[]);
1130    }
1131
1132    // Non-null and non-i31 block: Read the actual `VMGcKind` or
1133    // `VMSharedTypeIndex` out of the object's header and check whether it
1134    // matches the expected type.
1135    builder.switch_to_block(non_null_non_i31_block);
1136    log::trace!("translate_ref_test: non-null and non-i31 ref block");
1137    let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
1138                             builder: &mut FunctionBuilder,
1139                             val: ir::Value,
1140                             expected_kind: VMGcKind|
1141     -> ir::Value {
1142        let kind_addr = func_env.prepare_gc_ref_access(
1143            builder,
1144            val,
1145            BoundsCheck::StaticObjectField {
1146                offset: wasmtime_environ::VM_GC_HEADER_KIND_OFFSET,
1147                access_size: wasmtime_environ::VM_GC_KIND_SIZE,
1148                object_size: wasmtime_environ::VM_GC_HEADER_SIZE,
1149            },
1150        );
1151        let actual_kind = builder.ins().load(
1152            ir::types::I32,
1153            ir::MemFlags::trusted().with_readonly(),
1154            kind_addr,
1155            0,
1156        );
1157        let expected_kind = builder
1158            .ins()
1159            .iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
1160        // Inline version of `VMGcKind::matches`.
1161        let and = builder.ins().band(actual_kind, expected_kind);
1162        let kind_matches = builder
1163            .ins()
1164            .icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
1165        builder.ins().uextend(ir::types::I32, kind_matches)
1166    };
1167    let result = match test_ty.heap_type {
1168        WasmHeapType::Any
1169        | WasmHeapType::None
1170        | WasmHeapType::Extern
1171        | WasmHeapType::NoExtern
1172        | WasmHeapType::Func
1173        | WasmHeapType::NoFunc
1174        | WasmHeapType::Cont
1175        | WasmHeapType::NoCont
1176        | WasmHeapType::Exn
1177        | WasmHeapType::NoExn
1178        | WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
1179
1180        // For these abstract but non-top and non-bottom types, we check the
1181        // `VMGcKind` that is in the object's header.
1182        WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
1183        WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
1184        WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
1185
1186        // For concrete types, we need to do a full subtype check between the
1187        // `VMSharedTypeIndex` in the object's header and the
1188        // `ModuleInternedTypeIndex` we have here.
1189        //
1190        // TODO: This check should ideally be done inline, but we don't have a
1191        // good way to access the `TypeRegistry`'s supertypes arrays from Wasm
1192        // code at the moment.
1193        WasmHeapType::ConcreteArray(ty)
1194        | WasmHeapType::ConcreteStruct(ty)
1195        | WasmHeapType::ConcreteExn(ty) => {
1196            let expected_interned_ty = ty.unwrap_module_type_index();
1197            let expected_shared_ty =
1198                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1199
1200            let ty_addr = func_env.prepare_gc_ref_access(
1201                builder,
1202                val,
1203                BoundsCheck::StaticOffset {
1204                    offset: wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET,
1205                    access_size: func_env.offsets.size_of_vmshared_type_index(),
1206                },
1207            );
1208            let actual_shared_ty = builder.ins().load(
1209                ir::types::I32,
1210                ir::MemFlags::trusted().with_readonly(),
1211                ty_addr,
1212                0,
1213            );
1214
1215            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1216        }
1217
1218        // Same as for concrete arrays and structs except that a `VMFuncRef`
1219        // doesn't begin with a `VMGcHeader` and is a raw pointer rather than GC
1220        // heap index.
1221        WasmHeapType::ConcreteFunc(ty) => {
1222            let expected_interned_ty = ty.unwrap_module_type_index();
1223            let expected_shared_ty =
1224                func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
1225
1226            let actual_shared_ty = func_env.load_funcref_type_index(
1227                &mut builder.cursor(),
1228                ir::MemFlags::trusted().with_readonly(),
1229                val,
1230            );
1231
1232            func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
1233        }
1234        WasmHeapType::ConcreteCont(_) => {
1235            // TODO(#10248) GC integration for stack switching
1236            return Err(wasmtime_environ::WasmError::Unsupported(
1237                "Stack switching feature not compatible with GC, yet".to_string(),
1238            ));
1239        }
1240    };
1241    builder.ins().jump(continue_block, &[result.into()]);
1242
1243    // Control flow join point with the result.
1244    builder.switch_to_block(continue_block);
1245    let result = builder.append_block_param(continue_block, ir::types::I32);
1246    log::trace!("translate_ref_test(..) -> {result:?}");
1247
1248    builder.seal_block(non_null_block);
1249    builder.seal_block(non_null_non_i31_block);
1250    builder.seal_block(continue_block);
1251
1252    Ok(result)
1253}
1254
1255fn uextend_i32_to_pointer_type(
1256    builder: &mut FunctionBuilder,
1257    pointer_type: ir::Type,
1258    value: ir::Value,
1259) -> ir::Value {
1260    assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
1261    match pointer_type {
1262        ir::types::I32 => value,
1263        ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
1264        _ => unreachable!(),
1265    }
1266}
1267
1268/// Emit CLIF to compute an array object's total size, given the dynamic length
1269/// in its initialization.
1270///
1271/// Traps if the size overflows.
1272#[cfg_attr(
1273    not(any(feature = "gc-drc", feature = "gc-null")),
1274    expect(dead_code, reason = "easier to define")
1275)]
1276fn emit_array_size(
1277    func_env: &mut FuncEnvironment<'_>,
1278    builder: &mut FunctionBuilder<'_>,
1279    array_layout: &GcArrayLayout,
1280    len: ir::Value,
1281) -> ir::Value {
1282    let base_size = builder
1283        .ins()
1284        .iconst(ir::types::I32, i64::from(array_layout.base_size));
1285
1286    // `elems_size = len * elem_size`
1287    //
1288    // Check for multiplication overflow and trap if it occurs, since that
1289    // means Wasm is attempting to allocate an array that is larger than our
1290    // implementation limits. (Note: there is no standard implementation
1291    // limit for array length beyond `u32::MAX`.)
1292    //
1293    // We implement this check by encoding our logically-32-bit operands as
1294    // i64 values, doing a 64-bit multiplication, and then checking the high
1295    // 32 bits of the multiplication's result. If the high 32 bits are not
1296    // all zeros, then the multiplication overflowed.
1297    debug_assert_eq!(builder.func.dfg.value_type(len), ir::types::I32);
1298    let len = builder.ins().uextend(ir::types::I64, len);
1299    let elems_size_64 = builder
1300        .ins()
1301        .imul_imm(len, i64::from(array_layout.elem_size));
1302    let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
1303    func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
1304    let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
1305
1306    // And if adding the base size and elements size overflows, then the
1307    // allocation is too large.
1308    let size = func_env.uadd_overflow_trap(
1309        builder,
1310        base_size,
1311        elems_size,
1312        crate::TRAP_ALLOCATION_TOO_LARGE,
1313    );
1314
1315    size
1316}
1317
1318/// Common helper for struct-field initialization that can be reused across
1319/// collectors.
1320#[cfg_attr(
1321    not(any(feature = "gc-drc", feature = "gc-null")),
1322    expect(dead_code, reason = "easier to define")
1323)]
1324fn initialize_struct_fields(
1325    func_env: &mut FuncEnvironment<'_>,
1326    builder: &mut FunctionBuilder<'_>,
1327    struct_ty: ModuleInternedTypeIndex,
1328    raw_ptr_to_struct: ir::Value,
1329    field_values: &[ir::Value],
1330    mut init_field: impl FnMut(
1331        &mut FuncEnvironment<'_>,
1332        &mut FunctionBuilder<'_>,
1333        WasmStorageType,
1334        ir::Value,
1335        ir::Value,
1336    ) -> WasmResult<()>,
1337) -> WasmResult<()> {
1338    let struct_layout = func_env.struct_or_exn_layout(struct_ty);
1339    let struct_size = struct_layout.size;
1340    let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().map(|f| f.offset).collect();
1341    assert_eq!(field_offsets.len(), field_values.len());
1342
1343    assert!(!func_env.types[struct_ty].composite_type.shared);
1344    let fields = match &func_env.types[struct_ty].composite_type.inner {
1345        WasmCompositeInnerType::Struct(s) => &s.fields,
1346        WasmCompositeInnerType::Exn(e) => &e.fields,
1347        _ => panic!("Not a struct or exception type"),
1348    };
1349
1350    let field_types: SmallVec<[_; 8]> = fields.iter().cloned().collect();
1351    assert_eq!(field_types.len(), field_values.len());
1352
1353    for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
1354        let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
1355        assert!(offset + size_of_access <= struct_size);
1356        let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
1357        init_field(func_env, builder, ty.element_type, field_addr, *val)?;
1358    }
1359
1360    Ok(())
1361}
1362
1363impl FuncEnvironment<'_> {
1364    fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
1365        // Lazily compute and cache the layout.
1366        if !self.ty_to_gc_layout.contains_key(&type_index) {
1367            let ty = &self.types[type_index].composite_type;
1368            let layout = gc_compiler(self)
1369                .unwrap()
1370                .layouts()
1371                .gc_layout(ty)
1372                .expect("should only call `FuncEnvironment::gc_layout` for GC types");
1373            self.ty_to_gc_layout.insert(type_index, layout);
1374        }
1375
1376        self.ty_to_gc_layout.get(&type_index).unwrap()
1377    }
1378
1379    /// Get the `GcArrayLayout` for the array type at the given `type_index`.
1380    fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
1381        self.gc_layout(type_index).unwrap_array()
1382    }
1383
1384    /// Get the `GcStructLayout` for the struct or exception type at the given `type_index`.
1385    fn struct_or_exn_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
1386        let result = self.gc_layout(type_index).unwrap_struct();
1387        result
1388    }
1389
1390    /// Get or create the global for our GC heap's base pointer.
1391    fn get_gc_heap_base_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
1392        if let Some(base) = self.gc_heap_base {
1393            return base;
1394        }
1395
1396        let store_context_ptr = self.get_vmstore_context_ptr_global(func);
1397        let offset = self.offsets.ptr.vmstore_context_gc_heap_base();
1398
1399        let mut flags = ir::MemFlags::trusted();
1400        if !self
1401            .tunables
1402            .gc_heap_memory_type()
1403            .memory_may_move(self.tunables)
1404        {
1405            flags.set_readonly();
1406            flags.set_can_move();
1407        }
1408
1409        let base = func.create_global_value(ir::GlobalValueData::Load {
1410            base: store_context_ptr,
1411            offset: Offset32::new(offset.into()),
1412            global_type: self.pointer_type(),
1413            flags,
1414        });
1415
1416        self.gc_heap_base = Some(base);
1417        base
1418    }
1419
1420    /// Get the GC heap's base.
1421    #[cfg(any(feature = "gc-null", feature = "gc-drc"))]
1422    fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1423        let global = self.get_gc_heap_base_global(&mut builder.func);
1424        builder.ins().global_value(self.pointer_type(), global)
1425    }
1426
1427    fn get_gc_heap_bound_global(&mut self, func: &mut ir::Function) -> ir::GlobalValue {
1428        if let Some(bound) = self.gc_heap_bound {
1429            return bound;
1430        }
1431        let store_context_ptr = self.get_vmstore_context_ptr_global(func);
1432        let offset = self.offsets.ptr.vmstore_context_gc_heap_current_length();
1433        let bound = func.create_global_value(ir::GlobalValueData::Load {
1434            base: store_context_ptr,
1435            offset: Offset32::new(offset.into()),
1436            global_type: self.pointer_type(),
1437            flags: ir::MemFlags::trusted(),
1438        });
1439        self.gc_heap_bound = Some(bound);
1440        bound
1441    }
1442
1443    /// Get the GC heap's bound.
1444    #[cfg(feature = "gc-null")]
1445    fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
1446        let global = self.get_gc_heap_bound_global(&mut builder.func);
1447        builder.ins().global_value(self.pointer_type(), global)
1448    }
1449
1450    /// Get or create the `Heap` for our GC heap.
1451    fn get_gc_heap(&mut self, func: &mut ir::Function) -> Heap {
1452        if let Some(heap) = self.gc_heap {
1453            return heap;
1454        }
1455
1456        let base = self.get_gc_heap_base_global(func);
1457        let bound = self.get_gc_heap_bound_global(func);
1458        let memory = self.tunables.gc_heap_memory_type();
1459        let heap = self.heaps.push(HeapData {
1460            base,
1461            bound,
1462            pcc_memory_type: None,
1463            memory,
1464        });
1465        self.gc_heap = Some(heap);
1466        heap
1467    }
1468
1469    /// Get the raw pointer of `gc_ref[offset]` bounds checked for an access of
1470    /// `size` bytes.
1471    ///
1472    /// The given `gc_ref` must be a non-null, non-i31 GC reference.
1473    ///
1474    /// If `check` is a `BoundsCheck::Object`, then it is the callers
1475    /// responsibility to ensure that `offset + access_size <= object_size`.
1476    ///
1477    /// Returns a raw pointer to `gc_ref[offset]` -- not a raw pointer to the GC
1478    /// object itself (unless `offset` happens to be `0`). This raw pointer may
1479    /// be used to read or write up to as many bytes as described by `bound`. Do
1480    /// NOT attempt accesses bytes outside of `bound`; that may lead to
1481    /// unchecked out-of-bounds accesses.
1482    ///
1483    /// This method is collector-agnostic.
1484    fn prepare_gc_ref_access(
1485        &mut self,
1486        builder: &mut FunctionBuilder,
1487        gc_ref: ir::Value,
1488        bounds_check: BoundsCheck,
1489    ) -> ir::Value {
1490        log::trace!("prepare_gc_ref_access({gc_ref:?}, {bounds_check:?})");
1491        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1492
1493        let gc_heap = self.get_gc_heap(&mut builder.func);
1494        let gc_heap = self.heaps[gc_heap].clone();
1495        let result = match crate::bounds_checks::bounds_check_and_compute_addr(
1496            builder,
1497            self,
1498            &gc_heap,
1499            gc_ref,
1500            bounds_check,
1501            crate::TRAP_INTERNAL_ASSERT,
1502        ) {
1503            Reachability::Reachable(v) => v,
1504            Reachability::Unreachable => {
1505                // We are now in unreachable code, but we don't want to plumb
1506                // through a bunch of `Reachability` through all of our callers,
1507                // so just assert we won't reach here and return `null`
1508                let null = builder.ins().iconst(self.pointer_type(), 0);
1509                builder.ins().trapz(null, crate::TRAP_INTERNAL_ASSERT);
1510                null
1511            }
1512        };
1513        log::trace!("prepare_gc_ref_access(..) -> {result:?}");
1514        result
1515    }
1516
1517    /// Emit checks (if necessary) for whether the given `gc_ref` is null or is
1518    /// an `i31ref`.
1519    ///
1520    /// Takes advantage of static information based on `ty` as to whether the GC
1521    /// reference is nullable or can ever be an `i31`.
1522    ///
1523    /// Returns an `ir::Value` that is an `i32` will be non-zero if the GC
1524    /// reference is null or is an `i31ref`; otherwise, it will be zero.
1525    ///
1526    /// This method is collector-agnostic.
1527    #[cfg_attr(
1528        not(feature = "gc-drc"),
1529        expect(dead_code, reason = "easier to define")
1530    )]
1531    fn gc_ref_is_null_or_i31(
1532        &mut self,
1533        builder: &mut FunctionBuilder,
1534        ty: WasmRefType,
1535        gc_ref: ir::Value,
1536    ) -> ir::Value {
1537        assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
1538        assert!(ty.is_vmgcref_type_and_not_i31());
1539
1540        let might_be_i31 = match ty.heap_type {
1541            // If we are definitely dealing with an i31, we shouldn't be
1542            // emitting dynamic checks for it, and the caller shouldn't call
1543            // this function. Should have been caught by the assertion at the
1544            // start of the function.
1545            WasmHeapType::I31 => unreachable!(),
1546
1547            // Could potentially be an i31.
1548            WasmHeapType::Any | WasmHeapType::Eq => true,
1549
1550            // If it is definitely a struct, array, or uninhabited type, then it
1551            // is definitely not an i31.
1552            WasmHeapType::Array
1553            | WasmHeapType::ConcreteArray(_)
1554            | WasmHeapType::Struct
1555            | WasmHeapType::ConcreteStruct(_)
1556            | WasmHeapType::None => false,
1557
1558            // Despite being a different type hierarchy, this *could* be an
1559            // `i31` if it is the result of
1560            //
1561            //     (extern.convert_any (ref.i31 ...))
1562            WasmHeapType::Extern => true,
1563
1564            // Can only ever be `null`.
1565            WasmHeapType::NoExtern => false,
1566
1567            WasmHeapType::Exn | WasmHeapType::ConcreteExn(_) | WasmHeapType::NoExn => false,
1568
1569            // Wrong type hierarchy, and also funcrefs are not GC-managed
1570            // types. Should have been caught by the assertion at the start of
1571            // the function.
1572            WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
1573                unreachable!()
1574            }
1575            WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => {
1576                unreachable!()
1577            }
1578        };
1579
1580        match (ty.nullable, might_be_i31) {
1581            // This GC reference statically cannot be null nor an i31. (Let
1582            // Cranelift's optimizer const-propagate this value and erase any
1583            // unnecessary control flow resulting from branching on this value.)
1584            (false, false) => builder.ins().iconst(ir::types::I32, 0),
1585
1586            // This GC reference is always non-null, but might be an i31.
1587            (false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
1588
1589            // This GC reference might be null, but can never be an i31.
1590            (true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
1591
1592            // Fully general case: this GC reference could be either null or an
1593            // i31.
1594            (true, true) => {
1595                let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
1596                let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
1597                let is_null = builder.ins().uextend(ir::types::I32, is_null);
1598                builder.ins().bor(is_i31, is_null)
1599            }
1600        }
1601    }
1602
1603    // Emit code to check whether `a <: b` for two `VMSharedTypeIndex`es.
1604    pub(crate) fn is_subtype(
1605        &mut self,
1606        builder: &mut FunctionBuilder<'_>,
1607        a: ir::Value,
1608        b: ir::Value,
1609    ) -> ir::Value {
1610        log::trace!("is_subtype({a:?}, {b:?})");
1611
1612        let diff_tys_block = builder.create_block();
1613        let continue_block = builder.create_block();
1614
1615        // Current block: fast path for when `a == b`.
1616        log::trace!("is_subtype: fast path check for exact same types");
1617        let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
1618        let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
1619        builder.ins().brif(
1620            same_ty,
1621            continue_block,
1622            &[same_ty.into()],
1623            diff_tys_block,
1624            &[],
1625        );
1626
1627        // Different types block: fall back to the `is_subtype` libcall.
1628        builder.switch_to_block(diff_tys_block);
1629        log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
1630        let is_subtype = self.builtin_functions.is_subtype(builder.func);
1631        let vmctx = self.vmctx_val(&mut builder.cursor());
1632        let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
1633        let result = builder.func.dfg.first_result(call_inst);
1634        builder.ins().jump(continue_block, &[result.into()]);
1635
1636        // Continue block: join point for the result.
1637        builder.switch_to_block(continue_block);
1638        let result = builder.append_block_param(continue_block, ir::types::I32);
1639        log::trace!("is_subtype(..) -> {result:?}");
1640
1641        builder.seal_block(diff_tys_block);
1642        builder.seal_block(continue_block);
1643
1644        result
1645    }
1646}