use super::GcCompiler;
use crate::func_environ::{Extension, FuncEnvironment};
use crate::gc::ArrayInit;
use crate::translate::{StructFieldsVec, TargetEnvironment};
use crate::TRAP_INTERNAL_ASSERT;
use cranelift_codegen::{
cursor::FuncCursor,
ir::{self, condcodes::IntCC, InstBuilder},
};
use cranelift_entity::packed_option::ReservedValue;
use cranelift_frontend::FunctionBuilder;
use smallvec::SmallVec;
use wasmtime_environ::{
wasm_unsupported, Collector, GcArrayLayout, GcLayout, GcStructLayout, ModuleInternedTypeIndex,
PtrSize, TypeIndex, VMGcKind, WasmHeapTopType, WasmHeapType, WasmRefType, WasmResult,
WasmStorageType, WasmValType, I31_DISCRIMINANT,
};
#[cfg(feature = "gc-drc")]
mod drc;
#[cfg(feature = "gc-null")]
mod null;
pub fn gc_compiler(func_env: &FuncEnvironment<'_>) -> WasmResult<Box<dyn GcCompiler>> {
match func_env.tunables.collector {
#[cfg(feature = "gc-drc")]
Some(Collector::DeferredReferenceCounting) => Ok(Box::new(drc::DrcCompiler::default())),
#[cfg(not(feature = "gc-drc"))]
Some(Collector::DeferredReferenceCounting) => Err(wasm_unsupported!(
"the DRC collector is unavailable because the `gc-drc` feature \
was disabled at compile time",
)),
#[cfg(feature = "gc-null")]
Some(Collector::Null) => Ok(Box::new(null::NullCompiler::default())),
#[cfg(not(feature = "gc-null"))]
Some(Collector::Null) => Err(wasm_unsupported!(
"the null collector is unavailable because the `gc-null` feature \
was disabled at compile time",
)),
#[cfg(any(feature = "gc-drc", feature = "gc-null"))]
None => Err(wasm_unsupported!(
"support for GC types disabled at configuration time"
)),
#[cfg(not(any(feature = "gc-drc", feature = "gc-null")))]
None => Err(wasm_unsupported!(
"support for GC types disabled because no collector implementation \
was selected at compile time; enable one of the `gc-drc` or \
`gc-null` features",
)),
}
}
#[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
fn unbarriered_load_gc_ref(
builder: &mut FunctionBuilder,
ty: WasmHeapType,
ptr_to_gc_ref: ir::Value,
flags: ir::MemFlags,
) -> WasmResult<ir::Value> {
debug_assert!(ty.is_vmgcref_type());
let gc_ref = builder.ins().load(ir::types::I32, flags, ptr_to_gc_ref, 0);
if ty != WasmHeapType::I31 {
builder.declare_value_needs_stack_map(gc_ref);
}
Ok(gc_ref)
}
#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
fn unbarriered_store_gc_ref(
builder: &mut FunctionBuilder,
ty: WasmHeapType,
dst: ir::Value,
gc_ref: ir::Value,
flags: ir::MemFlags,
) -> WasmResult<()> {
debug_assert!(ty.is_vmgcref_type());
builder.ins().store(flags, gc_ref, dst, 0);
Ok(())
}
fn read_field_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
ty: WasmStorageType,
addr: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
assert_eq!(extension.is_none(), matches!(ty, WasmStorageType::Val(_)));
assert_eq!(
extension.is_some(),
matches!(ty, WasmStorageType::I8 | WasmStorageType::I16)
);
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
let value = match ty {
WasmStorageType::I8 => builder.ins().load(ir::types::I8, flags, addr, 0),
WasmStorageType::I16 => builder.ins().load(ir::types::I16, flags, addr, 0),
WasmStorageType::Val(v) => match v {
WasmValType::I32 => builder.ins().load(ir::types::I32, flags, addr, 0),
WasmValType::I64 => builder.ins().load(ir::types::I64, flags, addr, 0),
WasmValType::F32 => builder.ins().load(ir::types::F32, flags, addr, 0),
WasmValType::F64 => builder.ins().load(ir::types::F64, flags, addr, 0),
WasmValType::V128 => builder.ins().load(ir::types::I8X16, flags, addr, 0),
WasmValType::Ref(r) => match r.heap_type.top() {
WasmHeapTopType::Any | WasmHeapTopType::Extern => gc_compiler(func_env)?
.translate_read_gc_reference(func_env, builder, r, addr, flags)?,
WasmHeapTopType::Func => {
let expected_ty = match r.heap_type {
WasmHeapType::Func => ModuleInternedTypeIndex::reserved_value(),
WasmHeapType::ConcreteFunc(ty) => ty.unwrap_module_type_index(),
WasmHeapType::NoFunc => {
let null = builder.ins().iconst(func_env.pointer_type(), 0);
if !r.nullable {
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
}
return Ok(null);
}
_ => unreachable!("not a function heap type"),
};
let expected_ty = builder
.ins()
.iconst(ir::types::I32, i64::from(expected_ty.as_bits()));
let vmctx = func_env.vmctx_val(&mut builder.cursor());
let func_ref_id = builder.ins().load(ir::types::I32, flags, addr, 0);
let get_interned_func_ref = func_env
.builtin_functions
.get_interned_func_ref(builder.func);
let call_inst = builder
.ins()
.call(get_interned_func_ref, &[vmctx, func_ref_id, expected_ty]);
builder.func.dfg.first_result(call_inst)
}
WasmHeapTopType::Cont => todo!(), },
},
};
let value = match extension {
Some(Extension::Sign) => builder.ins().sextend(ir::types::I32, value),
Some(Extension::Zero) => builder.ins().uextend(ir::types::I32, value),
None => value,
};
Ok(value)
}
fn write_func_ref_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
ref_type: WasmRefType,
flags: ir::MemFlags,
field_addr: ir::Value,
func_ref: ir::Value,
) -> WasmResult<()> {
assert_eq!(ref_type.heap_type.top(), WasmHeapTopType::Func);
let vmctx = func_env.vmctx_val(&mut builder.cursor());
let intern_func_ref_for_gc_heap = func_env
.builtin_functions
.intern_func_ref_for_gc_heap(builder.func);
let func_ref = if ref_type.heap_type == WasmHeapType::NoFunc {
let null = builder.ins().iconst(func_env.pointer_type(), 0);
if !ref_type.nullable {
builder.ins().trapz(null, TRAP_INTERNAL_ASSERT);
}
null
} else {
func_ref
};
let call_inst = builder
.ins()
.call(intern_func_ref_for_gc_heap, &[vmctx, func_ref]);
let func_ref_id = builder.func.dfg.first_result(call_inst);
let func_ref_id = builder.ins().ireduce(ir::types::I32, func_ref_id);
builder.ins().store(flags, func_ref_id, field_addr, 0);
Ok(())
}
fn write_field_at_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
field_ty: WasmStorageType,
field_addr: ir::Value,
new_val: ir::Value,
) -> WasmResult<()> {
let flags = ir::MemFlags::trusted().with_endianness(ir::Endianness::Little);
match field_ty {
WasmStorageType::I8 => {
builder.ins().istore8(flags, new_val, field_addr, 0);
}
WasmStorageType::I16 => {
builder.ins().istore16(flags, new_val, field_addr, 0);
}
WasmStorageType::Val(WasmValType::Ref(r)) if r.heap_type.top() == WasmHeapTopType::Func => {
write_func_ref_at_addr(func_env, builder, r, flags, field_addr, new_val)?;
}
WasmStorageType::Val(WasmValType::Ref(r)) => {
gc_compiler(func_env)?
.translate_write_gc_reference(func_env, builder, r, field_addr, new_val, flags)?;
}
WasmStorageType::Val(_) => {
assert_eq!(
builder.func.dfg.value_type(new_val).bytes(),
wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty)
);
builder.ins().store(flags, new_val, field_addr, 0);
}
}
Ok(())
}
pub fn translate_struct_new(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
fields: &[ir::Value],
) -> WasmResult<ir::Value> {
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
}
fn default_value(
cursor: &mut FuncCursor,
func_env: &FuncEnvironment<'_>,
ty: &WasmStorageType,
) -> ir::Value {
match ty {
WasmStorageType::I8 | WasmStorageType::I16 => cursor.ins().iconst(ir::types::I32, 0),
WasmStorageType::Val(v) => match v {
WasmValType::I32 => cursor.ins().iconst(ir::types::I32, 0),
WasmValType::I64 => cursor.ins().iconst(ir::types::I64, 0),
WasmValType::F32 => cursor.ins().f32const(0.0),
WasmValType::F64 => cursor.ins().f64const(0.0),
WasmValType::V128 => {
let c = cursor.func.dfg.constants.insert(vec![0; 16].into());
cursor.ins().vconst(ir::types::I8X16, c)
}
WasmValType::Ref(r) => {
assert!(r.nullable);
let (ty, needs_stack_map) = func_env.reference_type(r.heap_type);
let _ = needs_stack_map;
cursor.ins().iconst(ty, 0)
}
},
}
}
pub fn translate_struct_new_default(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
) -> WasmResult<ir::Value> {
let interned_ty = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_ty = func_env.types.unwrap_struct(interned_ty)?;
let fields = struct_ty
.fields
.iter()
.map(|f| default_value(&mut builder.cursor(), func_env, &f.element_type))
.collect::<StructFieldsVec>();
gc_compiler(func_env)?.alloc_struct(func_env, builder, struct_type_index, &fields)
}
pub fn translate_struct_get(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
log::trace!("translate_struct_get({struct_type_index:?}, {field_index:?}, {struct_ref:?}, {extension:?})");
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
let field_index = usize::try_from(field_index).unwrap();
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_layout = func_env.struct_layout(interned_type_index);
let struct_size = struct_layout.size;
let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
let field_offset = struct_layout.fields[field_index];
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
assert!(field_offset + field_size <= struct_size);
let field_addr = func_env.prepare_gc_ref_access(
builder,
struct_ref,
Offset::Static(field_offset),
BoundsCheck::Object(struct_size_val),
);
let result = read_field_at_addr(
func_env,
builder,
field_ty.element_type,
field_addr,
extension,
);
log::trace!("translate_struct_get(..) -> {result:?}");
result
}
pub fn translate_struct_set(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
new_val: ir::Value,
) -> WasmResult<()> {
log::trace!(
"translate_struct_set({struct_type_index:?}, {field_index:?}, struct_ref: {struct_ref:?}, new_val: {new_val:?})"
);
func_env.trapz(builder, struct_ref, crate::TRAP_NULL_REFERENCE);
let field_index = usize::try_from(field_index).unwrap();
let interned_type_index = func_env.module.types[struct_type_index].unwrap_module_type_index();
let struct_layout = func_env.struct_layout(interned_type_index);
let struct_size = struct_layout.size;
let struct_size_val = builder.ins().iconst(ir::types::I32, i64::from(struct_size));
let field_offset = struct_layout.fields[field_index];
let field_ty = &func_env.types.unwrap_struct(interned_type_index)?.fields[field_index];
let field_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&field_ty.element_type);
assert!(field_offset + field_size <= struct_size);
let field_addr = func_env.prepare_gc_ref_access(
builder,
struct_ref,
Offset::Static(field_offset),
BoundsCheck::Object(struct_size_val),
);
write_field_at_addr(
func_env,
builder,
field_ty.element_type,
field_addr,
new_val,
)?;
log::trace!("translate_struct_set: finished");
Ok(())
}
pub fn translate_array_new(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elem: ir::Value,
len: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new({array_type_index:?}, {elem:?}, {len:?})");
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Fill { elem, len },
)?;
log::trace!("translate_array_new(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_new_default(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
len: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new_default({array_type_index:?}, {len:?})");
let interned_ty = func_env.module.types[array_type_index].unwrap_module_type_index();
let array_ty = func_env.types.unwrap_array(interned_ty)?;
let elem = default_value(&mut builder.cursor(), func_env, &array_ty.0.element_type);
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Fill { elem, len },
)?;
log::trace!("translate_array_new_default(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_new_fixed(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elems: &[ir::Value],
) -> WasmResult<ir::Value> {
log::trace!("translate_array_new_fixed({array_type_index:?}, {elems:?})");
let result = gc_compiler(func_env)?.alloc_array(
func_env,
builder,
array_type_index,
ArrayInit::Elems(elems),
)?;
log::trace!("translate_array_new_fixed(..) -> {result:?}");
Ok(result)
}
impl ArrayInit<'_> {
#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
fn len(self, pos: &mut FuncCursor) -> ir::Value {
match self {
ArrayInit::Fill { len, .. } => len,
ArrayInit::Elems(e) => {
let len = u32::try_from(e.len()).unwrap();
pos.ins().iconst(ir::types::I32, i64::from(len))
}
}
}
#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
fn initialize(
self,
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
interned_type_index: ModuleInternedTypeIndex,
base_size: u32,
size: ir::Value,
elems_addr: ir::Value,
mut init_field: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
WasmStorageType,
ir::Value,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
log::trace!(
"initialize_array({interned_type_index:?}, {base_size:?}, {size:?}, {elems_addr:?})"
);
assert!(!func_env.types[interned_type_index].composite_type.shared);
let array_ty = func_env.types[interned_type_index]
.composite_type
.inner
.unwrap_array();
let elem_ty = array_ty.0.element_type;
let elem_size = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&elem_ty);
let pointer_type = func_env.pointer_type();
let elem_size = builder.ins().iconst(pointer_type, i64::from(elem_size));
match self {
ArrayInit::Elems(elems) => {
let mut elem_addr = elems_addr;
for val in elems {
init_field(func_env, builder, elem_ty, elem_addr, *val)?;
elem_addr = builder.ins().iadd(elem_addr, elem_size);
}
}
ArrayInit::Fill { elem, len: _ } => {
let base_size = builder.ins().iconst(pointer_type, i64::from(base_size));
let array_addr = builder.ins().isub(elems_addr, base_size);
let size = uextend_i32_to_pointer_type(builder, pointer_type, size);
let elems_end = builder.ins().iadd(array_addr, size);
emit_array_fill_impl(
func_env,
builder,
elems_addr,
elem_size,
elems_end,
|func_env, builder, elem_addr| {
init_field(func_env, builder, elem_ty, elem_addr, elem)
},
)?;
}
}
log::trace!("initialize_array: finished");
Ok(())
}
}
fn emit_array_fill_impl(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
elem_addr: ir::Value,
elem_size: ir::Value,
fill_end: ir::Value,
mut emit_elem_write: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
log::trace!("emit_array_fill_impl(elem_addr: {elem_addr:?}, elem_size: {elem_size:?}, fill_end: {fill_end:?})");
let pointer_ty = func_env.pointer_type();
assert_eq!(builder.func.dfg.value_type(elem_addr), pointer_ty);
assert_eq!(builder.func.dfg.value_type(elem_size), pointer_ty);
assert_eq!(builder.func.dfg.value_type(fill_end), pointer_ty);
let current_block = builder.current_block().unwrap();
let loop_header_block = builder.create_block();
let loop_body_block = builder.create_block();
let continue_block = builder.create_block();
builder.ensure_inserted_block();
builder.insert_block_after(loop_header_block, current_block);
builder.insert_block_after(loop_body_block, loop_header_block);
builder.insert_block_after(continue_block, loop_body_block);
builder.ins().jump(loop_header_block, &[elem_addr]);
builder.switch_to_block(loop_header_block);
builder.append_block_param(loop_header_block, pointer_ty);
log::trace!("emit_array_fill_impl: loop header");
let elem_addr = builder.block_params(loop_header_block)[0];
let done = builder.ins().icmp(IntCC::Equal, elem_addr, fill_end);
builder
.ins()
.brif(done, continue_block, &[], loop_body_block, &[]);
builder.switch_to_block(loop_body_block);
log::trace!("emit_array_fill_impl: loop body");
emit_elem_write(func_env, builder, elem_addr)?;
let next_elem_addr = builder.ins().iadd(elem_addr, elem_size);
builder.ins().jump(loop_header_block, &[next_elem_addr]);
builder.switch_to_block(continue_block);
log::trace!("emit_array_fill_impl: finished");
builder.seal_block(loop_header_block);
builder.seal_block(loop_body_block);
builder.seal_block(continue_block);
Ok(())
}
pub fn translate_array_fill(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
value: ir::Value,
n: ir::Value,
) -> WasmResult<()> {
log::trace!(
"translate_array_fill({array_type_index:?}, {array_ref:?}, {index:?}, {value:?}, {n:?})"
);
let len = translate_array_len(func_env, builder, array_ref)?;
let end_index = func_env.uadd_overflow_trap(builder, index, n, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let out_of_bounds = builder
.ins()
.icmp(IntCC::UnsignedGreaterThan, end_index, len);
func_env.trapnz(builder, out_of_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let interned_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
} = emit_array_size_info(func_env, builder, interned_type_index, len);
let offset_in_elems = builder.ins().imul(index, one_elem_size);
let obj_offset = builder.ins().iadd(base_size, offset_in_elems);
let elem_addr = func_env.prepare_gc_ref_access(
builder,
array_ref,
Offset::Dynamic(obj_offset),
BoundsCheck::Object(obj_size),
);
let fill_size = uextend_i32_to_pointer_type(builder, func_env.pointer_type(), offset_in_elems);
let fill_end = builder.ins().iadd(elem_addr, fill_size);
let one_elem_size =
uextend_i32_to_pointer_type(builder, func_env.pointer_type(), one_elem_size);
let result = emit_array_fill_impl(
func_env,
builder,
elem_addr,
one_elem_size,
fill_end,
|func_env, builder, elem_addr| {
let elem_ty = func_env
.types
.unwrap_array(interned_type_index)?
.0
.element_type;
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)
},
)?;
log::trace!("translate_array_fill(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_len(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_ref: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_len({array_ref:?})");
func_env.trapz(builder, array_ref, crate::TRAP_NULL_REFERENCE);
let len_offset = gc_compiler(func_env)?.layouts().array_length_field_offset();
let len_field = func_env.prepare_gc_ref_access(
builder,
array_ref,
Offset::Static(len_offset),
BoundsCheck::Access(ir::types::I32.bytes()),
);
let result = builder
.ins()
.load(ir::types::I32, ir::MemFlags::trusted(), len_field, 0);
log::trace!("translate_array_len(..) -> {result:?}");
Ok(result)
}
struct ArraySizeInfo {
obj_size: ir::Value,
one_elem_size: ir::Value,
base_size: ir::Value,
}
fn emit_array_size_info(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: ModuleInternedTypeIndex,
array_len: ir::Value,
) -> ArraySizeInfo {
let array_layout = func_env.array_layout(array_type_index);
let one_elem_size = builder
.ins()
.iconst(ir::types::I64, i64::from(array_layout.elem_size));
let array_len = builder.ins().uextend(ir::types::I64, array_len);
let all_elems_size = builder.ins().imul(one_elem_size, array_len);
let high_bits = builder.ins().ushr_imm(all_elems_size, 32);
builder.ins().trapnz(high_bits, TRAP_INTERNAL_ASSERT);
let all_elems_size = builder.ins().ireduce(ir::types::I32, all_elems_size);
let base_size = builder
.ins()
.iconst(ir::types::I32, i64::from(array_layout.base_size));
let obj_size =
builder
.ins()
.uadd_overflow_trap(all_elems_size, base_size, TRAP_INTERNAL_ASSERT);
let one_elem_size = builder.ins().ireduce(ir::types::I32, one_elem_size);
ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
}
}
fn array_elem_addr(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_type_index: ModuleInternedTypeIndex,
array_ref: ir::Value,
index: ir::Value,
) -> ir::Value {
let len = translate_array_len(func_env, builder, array_ref).unwrap();
let in_bounds = builder.ins().icmp(IntCC::UnsignedLessThan, index, len);
func_env.trapz(builder, in_bounds, crate::TRAP_ARRAY_OUT_OF_BOUNDS);
let ArraySizeInfo {
obj_size,
one_elem_size,
base_size,
} = emit_array_size_info(func_env, builder, array_type_index, len);
let offset_in_elems = builder.ins().imul(index, one_elem_size);
let offset_in_array = builder.ins().iadd(offset_in_elems, base_size);
func_env.prepare_gc_ref_access(
builder,
array_ref,
Offset::Dynamic(offset_in_array),
BoundsCheck::Object(obj_size),
)
}
pub fn translate_array_get(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
log::trace!("translate_array_get({array_type_index:?}, {array_ref:?}, {index:?})");
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
let array_ty = func_env.types.unwrap_array(array_type_index)?;
let elem_ty = array_ty.0.element_type;
let result = read_field_at_addr(func_env, builder, elem_ty, elem_addr, extension)?;
log::trace!("translate_array_get(..) -> {result:?}");
Ok(result)
}
pub fn translate_array_set(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array_ref: ir::Value,
index: ir::Value,
value: ir::Value,
) -> WasmResult<()> {
log::trace!("translate_array_set({array_type_index:?}, {array_ref:?}, {index:?}, {value:?})");
let array_type_index = func_env.module.types[array_type_index].unwrap_module_type_index();
let elem_addr = array_elem_addr(func_env, builder, array_type_index, array_ref, index);
let array_ty = func_env.types.unwrap_array(array_type_index)?;
let elem_ty = array_ty.0.element_type;
write_field_at_addr(func_env, builder, elem_ty, elem_addr, value)?;
log::trace!("translate_array_set: finished");
Ok(())
}
pub fn translate_ref_test(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
ref_ty: WasmRefType,
val: ir::Value,
) -> WasmResult<ir::Value> {
log::trace!("translate_ref_test({ref_ty:?}, {val:?})");
if ref_ty.heap_type.is_bottom() {
let result = if ref_ty.nullable {
func_env.translate_ref_is_null(builder.cursor(), val)?
} else {
builder.ins().iconst(ir::types::I32, 0)
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
if ref_ty.heap_type.is_top() {
let result = if ref_ty.nullable {
builder.ins().iconst(ir::types::I32, 1)
} else {
let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
let zero = builder.ins().iconst(ir::types::I32, 0);
let one = builder.ins().iconst(ir::types::I32, 1);
builder.ins().select(is_null, zero, one)
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
if ref_ty.heap_type == WasmHeapType::I31 {
let i31_mask = builder.ins().iconst(
ir::types::I32,
i64::from(wasmtime_environ::I31_DISCRIMINANT),
);
let is_i31 = builder.ins().band(val, i31_mask);
let result = if ref_ty.nullable {
let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
builder.ins().bor(is_null, is_i31)
} else {
is_i31
};
log::trace!("translate_ref_test(..) -> {result:?}");
return Ok(result);
}
let is_any_hierarchy = ref_ty.heap_type.top() == WasmHeapTopType::Any;
let non_null_block = builder.create_block();
let non_null_non_i31_block = builder.create_block();
let continue_block = builder.create_block();
let is_null = func_env.translate_ref_is_null(builder.cursor(), val)?;
let result_when_is_null = builder.ins().iconst(ir::types::I32, ref_ty.nullable as i64);
builder.ins().brif(
is_null,
continue_block,
&[result_when_is_null],
non_null_block,
&[],
);
builder.switch_to_block(non_null_block);
log::trace!("translate_ref_test: non-null ref block");
if is_any_hierarchy {
let i31_mask = builder.ins().iconst(
ir::types::I32,
i64::from(wasmtime_environ::I31_DISCRIMINANT),
);
let is_i31 = builder.ins().band(val, i31_mask);
let result_when_is_i31 = builder.ins().iconst(
ir::types::I32,
matches!(
ref_ty.heap_type,
WasmHeapType::Any | WasmHeapType::Eq | WasmHeapType::I31
) as i64,
);
builder.ins().brif(
is_i31,
continue_block,
&[result_when_is_i31],
non_null_non_i31_block,
&[],
);
} else {
builder.ins().jump(non_null_non_i31_block, &[]);
}
builder.switch_to_block(non_null_non_i31_block);
log::trace!("translate_ref_test: non-null and non-i31 ref block");
let check_header_kind = |func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder,
val: ir::Value,
expected_kind: VMGcKind|
-> ir::Value {
let header_size = builder.ins().iconst(
ir::types::I32,
i64::from(wasmtime_environ::VM_GC_HEADER_SIZE),
);
let kind_addr = func_env.prepare_gc_ref_access(
builder,
val,
Offset::Static(wasmtime_environ::VM_GC_HEADER_KIND_OFFSET),
BoundsCheck::Object(header_size),
);
let actual_kind = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
kind_addr,
0,
);
let expected_kind = builder
.ins()
.iconst(ir::types::I32, i64::from(expected_kind.as_u32()));
let and = builder.ins().band(actual_kind, expected_kind);
let kind_matches = builder
.ins()
.icmp(ir::condcodes::IntCC::Equal, and, expected_kind);
builder.ins().uextend(ir::types::I32, kind_matches)
};
let result = match ref_ty.heap_type {
WasmHeapType::Any
| WasmHeapType::None
| WasmHeapType::Extern
| WasmHeapType::NoExtern
| WasmHeapType::Func
| WasmHeapType::NoFunc
| WasmHeapType::I31 => unreachable!("handled top, bottom, and i31 types above"),
WasmHeapType::Eq => check_header_kind(func_env, builder, val, VMGcKind::EqRef),
WasmHeapType::Struct => check_header_kind(func_env, builder, val, VMGcKind::StructRef),
WasmHeapType::Array => check_header_kind(func_env, builder, val, VMGcKind::ArrayRef),
WasmHeapType::ConcreteArray(ty) | WasmHeapType::ConcreteStruct(ty) => {
let expected_interned_ty = ty.unwrap_module_type_index();
let expected_shared_ty =
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
let ty_addr = func_env.prepare_gc_ref_access(
builder,
val,
Offset::Static(wasmtime_environ::VM_GC_HEADER_TYPE_INDEX_OFFSET),
BoundsCheck::Access(func_env.offsets.size_of_vmshared_type_index().into()),
);
let actual_shared_ty = builder.ins().load(
ir::types::I32,
ir::MemFlags::trusted().with_readonly(),
ty_addr,
0,
);
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
}
WasmHeapType::ConcreteFunc(ty) => {
let expected_interned_ty = ty.unwrap_module_type_index();
let expected_shared_ty =
func_env.module_interned_to_shared_ty(&mut builder.cursor(), expected_interned_ty);
let actual_shared_ty = func_env.load_funcref_type_index(
&mut builder.cursor(),
ir::MemFlags::trusted().with_readonly(),
val,
);
func_env.is_subtype(builder, actual_shared_ty, expected_shared_ty)
}
WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), };
builder.ins().jump(continue_block, &[result]);
builder.switch_to_block(continue_block);
let result = builder.append_block_param(continue_block, ir::types::I32);
log::trace!("translate_ref_test(..) -> {result:?}");
builder.seal_block(non_null_block);
builder.seal_block(non_null_non_i31_block);
builder.seal_block(continue_block);
Ok(result)
}
#[derive(Debug)]
enum Offset {
Static(u32),
Dynamic(ir::Value),
}
#[derive(Debug)]
enum BoundsCheck {
Object(ir::Value),
Access(u32),
}
fn uextend_i32_to_pointer_type(
builder: &mut FunctionBuilder,
pointer_type: ir::Type,
value: ir::Value,
) -> ir::Value {
assert_eq!(builder.func.dfg.value_type(value), ir::types::I32);
match pointer_type {
ir::types::I32 => value,
ir::types::I64 => builder.ins().uextend(ir::types::I64, value),
_ => unreachable!(),
}
}
#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
fn emit_array_size(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
array_layout: &GcArrayLayout,
init: ArrayInit<'_>,
) -> ir::Value {
let base_size = builder
.ins()
.iconst(ir::types::I32, i64::from(array_layout.base_size));
let len = init.len(&mut builder.cursor());
let len = builder.ins().uextend(ir::types::I64, len);
let elems_size_64 = builder
.ins()
.imul_imm(len, i64::from(array_layout.elem_size));
let high_bits = builder.ins().ushr_imm(elems_size_64, 32);
func_env.trapnz(builder, high_bits, crate::TRAP_ALLOCATION_TOO_LARGE);
let elems_size = builder.ins().ireduce(ir::types::I32, elems_size_64);
let size = func_env.uadd_overflow_trap(
builder,
base_size,
elems_size,
crate::TRAP_ALLOCATION_TOO_LARGE,
);
size
}
#[cfg_attr(not(any(feature = "gc-drc", feature = "gc-null")), allow(dead_code))]
fn initialize_struct_fields(
func_env: &mut FuncEnvironment<'_>,
builder: &mut FunctionBuilder<'_>,
struct_ty: ModuleInternedTypeIndex,
raw_ptr_to_struct: ir::Value,
field_values: &[ir::Value],
mut init_field: impl FnMut(
&mut FuncEnvironment<'_>,
&mut FunctionBuilder<'_>,
WasmStorageType,
ir::Value,
ir::Value,
) -> WasmResult<()>,
) -> WasmResult<()> {
let struct_layout = func_env.struct_layout(struct_ty);
let struct_size = struct_layout.size;
let field_offsets: SmallVec<[_; 8]> = struct_layout.fields.iter().copied().collect();
assert_eq!(field_offsets.len(), field_values.len());
assert!(!func_env.types[struct_ty].composite_type.shared);
let struct_ty = func_env.types[struct_ty]
.composite_type
.inner
.unwrap_struct();
let field_types: SmallVec<[_; 8]> = struct_ty.fields.iter().cloned().collect();
assert_eq!(field_types.len(), field_values.len());
for ((ty, val), offset) in field_types.into_iter().zip(field_values).zip(field_offsets) {
let size_of_access = wasmtime_environ::byte_size_of_wasm_ty_in_gc_heap(&ty.element_type);
assert!(offset + size_of_access <= struct_size);
let field_addr = builder.ins().iadd_imm(raw_ptr_to_struct, i64::from(offset));
init_field(func_env, builder, ty.element_type, field_addr, *val)?;
}
Ok(())
}
impl FuncEnvironment<'_> {
fn gc_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcLayout {
if !self.ty_to_gc_layout.contains_key(&type_index) {
let ty = &self.types[type_index].composite_type;
let layout = gc_compiler(self)
.unwrap()
.layouts()
.gc_layout(ty)
.expect("should only call `FuncEnvironment::gc_layout` for GC types");
self.ty_to_gc_layout.insert(type_index, layout);
}
self.ty_to_gc_layout.get(&type_index).unwrap()
}
fn array_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcArrayLayout {
self.gc_layout(type_index).unwrap_array()
}
fn struct_layout(&mut self, type_index: ModuleInternedTypeIndex) -> &GcStructLayout {
self.gc_layout(type_index).unwrap_struct()
}
fn get_gc_heap_base(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
let ptr_ty = self.pointer_type();
let flags = ir::MemFlags::trusted().with_readonly();
let vmctx = self.vmctx(builder.func);
let vmctx = builder.ins().global_value(ptr_ty, vmctx);
let base_offset = self.offsets.ptr.vmctx_gc_heap_base();
let base_offset = i32::from(base_offset);
builder.ins().load(ptr_ty, flags, vmctx, base_offset)
}
fn get_gc_heap_bound(&mut self, builder: &mut FunctionBuilder) -> ir::Value {
let ptr_ty = self.pointer_type();
let flags = ir::MemFlags::trusted().with_readonly();
let vmctx = self.vmctx(builder.func);
let vmctx = builder.ins().global_value(ptr_ty, vmctx);
let bound_offset = self.offsets.ptr.vmctx_gc_heap_bound();
let bound_offset = i32::from(bound_offset);
builder.ins().load(ptr_ty, flags, vmctx, bound_offset)
}
fn get_gc_heap_base_bound(&mut self, builder: &mut FunctionBuilder) -> (ir::Value, ir::Value) {
let base = self.get_gc_heap_base(builder);
let bound = self.get_gc_heap_bound(builder);
(base, bound)
}
fn prepare_gc_ref_access(
&mut self,
builder: &mut FunctionBuilder,
gc_ref: ir::Value,
offset: Offset,
check: BoundsCheck,
) -> ir::Value {
log::trace!("prepare_gc_ref_access({gc_ref:?}, {offset:?}, {check:?})");
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
let pointer_type = self.pointer_type();
let (base, bound) = self.get_gc_heap_base_bound(builder);
let index = uextend_i32_to_pointer_type(builder, pointer_type, gc_ref);
let offset = match offset {
Offset::Dynamic(offset) => uextend_i32_to_pointer_type(builder, pointer_type, offset),
Offset::Static(offset) => builder.ins().iconst(pointer_type, i64::from(offset)),
};
let index_and_offset =
builder
.ins()
.uadd_overflow_trap(index, offset, TRAP_INTERNAL_ASSERT);
let end = match check {
BoundsCheck::Object(object_size) => {
let object_size = uextend_i32_to_pointer_type(builder, pointer_type, object_size);
builder
.ins()
.uadd_overflow_trap(index, object_size, TRAP_INTERNAL_ASSERT)
}
BoundsCheck::Access(access_size) => {
let access_size = builder.ins().iconst(pointer_type, i64::from(access_size));
builder.ins().uadd_overflow_trap(
index_and_offset,
access_size,
TRAP_INTERNAL_ASSERT,
)
}
};
let is_in_bounds =
builder
.ins()
.icmp(ir::condcodes::IntCC::UnsignedLessThanOrEqual, end, bound);
builder.ins().trapz(is_in_bounds, TRAP_INTERNAL_ASSERT);
let result = builder.ins().iadd(base, index_and_offset);
log::trace!("prepare_gc_ref_access(..) -> {result:?}");
result
}
#[cfg_attr(not(feature = "gc-drc"), allow(dead_code))]
fn gc_ref_is_null_or_i31(
&mut self,
builder: &mut FunctionBuilder,
ty: WasmRefType,
gc_ref: ir::Value,
) -> ir::Value {
assert_eq!(builder.func.dfg.value_type(gc_ref), ir::types::I32);
assert!(ty.is_vmgcref_type_and_not_i31());
let might_be_i31 = match ty.heap_type {
WasmHeapType::I31 => unreachable!(),
WasmHeapType::Any | WasmHeapType::Eq => true,
WasmHeapType::Array
| WasmHeapType::ConcreteArray(_)
| WasmHeapType::Struct
| WasmHeapType::ConcreteStruct(_)
| WasmHeapType::None => false,
WasmHeapType::Extern | WasmHeapType::NoExtern => false,
WasmHeapType::Func | WasmHeapType::ConcreteFunc(_) | WasmHeapType::NoFunc => {
unreachable!()
}
WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), };
match (ty.nullable, might_be_i31) {
(false, false) => builder.ins().iconst(ir::types::I32, 0),
(false, true) => builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT)),
(true, false) => builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0),
(true, true) => {
let is_i31 = builder.ins().band_imm(gc_ref, i64::from(I31_DISCRIMINANT));
let is_null = builder.ins().icmp_imm(IntCC::Equal, gc_ref, 0);
let is_null = builder.ins().uextend(ir::types::I32, is_null);
builder.ins().bor(is_i31, is_null)
}
}
}
pub(crate) fn is_subtype(
&mut self,
builder: &mut FunctionBuilder<'_>,
a: ir::Value,
b: ir::Value,
) -> ir::Value {
log::trace!("is_subtype({a:?}, {b:?})");
let diff_tys_block = builder.create_block();
let continue_block = builder.create_block();
log::trace!("is_subtype: fast path check for exact same types");
let same_ty = builder.ins().icmp(IntCC::Equal, a, b);
let same_ty = builder.ins().uextend(ir::types::I32, same_ty);
builder
.ins()
.brif(same_ty, continue_block, &[same_ty], diff_tys_block, &[]);
builder.switch_to_block(diff_tys_block);
log::trace!("is_subtype: slow path to do full `is_subtype` libcall");
let is_subtype = self.builtin_functions.is_subtype(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let call_inst = builder.ins().call(is_subtype, &[vmctx, a, b]);
let result = builder.func.dfg.first_result(call_inst);
builder.ins().jump(continue_block, &[result]);
builder.switch_to_block(continue_block);
let result = builder.append_block_param(continue_block, ir::types::I32);
log::trace!("is_subtype(..) -> {result:?}");
builder.seal_block(diff_tys_block);
builder.seal_block(continue_block);
result
}
}