use crate::compiler::Compiler;
use crate::translate::{
FuncTranslationState, GlobalVariable, Heap, HeapData, StructFieldsVec, TableData, TableSize,
TargetEnvironment,
};
use crate::{gc, BuiltinFunctionSignatures, TRAP_INTERNAL_ASSERT};
use cranelift_codegen::cursor::FuncCursor;
use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
use cranelift_codegen::ir::immediates::{Imm64, Offset32};
use cranelift_codegen::ir::pcc::Fact;
use cranelift_codegen::ir::types::*;
use cranelift_codegen::ir::{self, types};
use cranelift_codegen::ir::{ArgumentPurpose, Function, InstBuilder, MemFlags};
use cranelift_codegen::isa::{TargetFrontendConfig, TargetIsa};
use cranelift_entity::packed_option::ReservedValue;
use cranelift_entity::{EntityRef, PrimaryMap, SecondaryMap};
use cranelift_frontend::FunctionBuilder;
use cranelift_frontend::Variable;
use smallvec::SmallVec;
use std::mem;
use wasmparser::{Operator, WasmFeatures};
use wasmtime_environ::{
BuiltinFunctionIndex, DataIndex, ElemIndex, EngineOrModuleTypeIndex, FuncIndex, GlobalIndex,
IndexType, Memory, MemoryIndex, Module, ModuleInternedTypeIndex, ModuleTranslation,
ModuleTypesBuilder, PtrSize, Table, TableIndex, TripleExt, Tunables, TypeConvert, TypeIndex,
VMOffsets, WasmCompositeInnerType, WasmFuncType, WasmHeapTopType, WasmHeapType, WasmRefType,
WasmResult, WasmValType,
};
use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK};
#[derive(Debug)]
pub(crate) enum Extension {
Sign,
Zero,
}
pub(crate) struct BuiltinFunctions {
types: BuiltinFunctionSignatures,
builtins: [Option<ir::FuncRef>; BuiltinFunctionIndex::len() as usize],
}
impl BuiltinFunctions {
fn new(compiler: &Compiler) -> Self {
Self {
types: BuiltinFunctionSignatures::new(compiler),
builtins: [None; BuiltinFunctionIndex::len() as usize],
}
}
fn load_builtin(&mut self, func: &mut Function, index: BuiltinFunctionIndex) -> ir::FuncRef {
let cache = &mut self.builtins[index.index() as usize];
if let Some(f) = cache {
return *f;
}
let signature = func.import_signature(self.types.wasm_signature(index));
let name =
ir::ExternalName::User(func.declare_imported_user_function(ir::UserExternalName {
namespace: crate::NS_WASMTIME_BUILTIN,
index: index.index(),
}));
let f = func.import_function(ir::ExtFuncData {
name,
signature,
colocated: true,
});
*cache = Some(f);
f
}
}
macro_rules! declare_function_signatures {
($(
$( #[$attr:meta] )*
$name:ident( $( $pname:ident: $param:ident ),* ) $( -> $result:ident )?;
)*) => {
$(impl BuiltinFunctions {
$( #[$attr] )*
pub(crate) fn $name(&mut self, func: &mut Function) -> ir::FuncRef {
self.load_builtin(func, BuiltinFunctionIndex::$name())
}
})*
};
}
wasmtime_environ::foreach_builtin_function!(declare_function_signatures);
pub struct FuncEnvironment<'module_environment> {
compiler: &'module_environment Compiler,
isa: &'module_environment (dyn TargetIsa + 'module_environment),
pub(crate) module: &'module_environment Module,
pub(crate) types: &'module_environment ModuleTypesBuilder,
wasm_func_ty: &'module_environment WasmFuncType,
sig_ref_to_ty: SecondaryMap<ir::SigRef, Option<&'module_environment WasmFuncType>>,
#[cfg(feature = "gc")]
pub(crate) ty_to_gc_layout: std::collections::HashMap<
wasmtime_environ::ModuleInternedTypeIndex,
wasmtime_environ::GcLayout,
>,
#[cfg(feature = "wmemcheck")]
translation: &'module_environment ModuleTranslation<'module_environment>,
heaps: PrimaryMap<Heap, HeapData>,
tables: SecondaryMap<TableIndex, Option<TableData>>,
vmctx: Option<ir::GlobalValue>,
pcc_vmctx_memtype: Option<ir::MemoryType>,
pub(crate) builtin_functions: BuiltinFunctions,
pub(crate) offsets: VMOffsets<u8>,
pub(crate) tunables: &'module_environment Tunables,
fuel_var: cranelift_frontend::Variable,
vmstore_context_ptr: ir::Value,
epoch_deadline_var: cranelift_frontend::Variable,
epoch_ptr_var: cranelift_frontend::Variable,
fuel_consumed: i64,
pub(crate) stack_limit_at_function_entry: Option<ir::GlobalValue>,
}
impl<'module_environment> FuncEnvironment<'module_environment> {
pub fn new(
compiler: &'module_environment Compiler,
translation: &'module_environment ModuleTranslation<'module_environment>,
types: &'module_environment ModuleTypesBuilder,
wasm_func_ty: &'module_environment WasmFuncType,
) -> Self {
let tunables = compiler.tunables();
let builtin_functions = BuiltinFunctions::new(compiler);
let _ = BuiltinFunctions::raise;
Self {
isa: compiler.isa(),
module: &translation.module,
compiler,
types,
wasm_func_ty,
sig_ref_to_ty: SecondaryMap::default(),
#[cfg(feature = "gc")]
ty_to_gc_layout: std::collections::HashMap::new(),
heaps: PrimaryMap::default(),
tables: SecondaryMap::default(),
vmctx: None,
pcc_vmctx_memtype: None,
builtin_functions,
offsets: VMOffsets::new(compiler.isa().pointer_bytes(), &translation.module),
tunables,
fuel_var: Variable::new(0),
epoch_deadline_var: Variable::new(0),
epoch_ptr_var: Variable::new(0),
vmstore_context_ptr: ir::Value::reserved_value(),
fuel_consumed: 1,
#[cfg(feature = "wmemcheck")]
translation,
stack_limit_at_function_entry: None,
}
}
pub(crate) fn pointer_type(&self) -> ir::Type {
self.isa.pointer_type()
}
pub(crate) fn vmctx(&mut self, func: &mut Function) -> ir::GlobalValue {
self.vmctx.unwrap_or_else(|| {
let vmctx = func.create_global_value(ir::GlobalValueData::VMContext);
if self.isa.flags().enable_pcc() {
let vmctx_memtype = func.create_memory_type(ir::MemoryTypeData::Struct {
size: 0,
fields: vec![],
});
self.pcc_vmctx_memtype = Some(vmctx_memtype);
func.global_value_facts[vmctx] = Some(Fact::Mem {
ty: vmctx_memtype,
min_offset: 0,
max_offset: 0,
nullable: false,
});
}
self.vmctx = Some(vmctx);
vmctx
})
}
pub(crate) fn vmctx_val(&mut self, pos: &mut FuncCursor<'_>) -> ir::Value {
let pointer_type = self.pointer_type();
let vmctx = self.vmctx(&mut pos.func);
pos.ins().global_value(pointer_type, vmctx)
}
fn get_table_copy_func(
&mut self,
func: &mut Function,
dst_table_index: TableIndex,
src_table_index: TableIndex,
) -> (ir::FuncRef, usize, usize) {
let sig = self.builtin_functions.table_copy(func);
(
sig,
dst_table_index.as_u32() as usize,
src_table_index.as_u32() as usize,
)
}
#[cfg(feature = "threads")]
fn get_memory_atomic_wait(
&mut self,
func: &mut Function,
memory_index: MemoryIndex,
ty: ir::Type,
) -> (ir::FuncRef, usize) {
match ty {
I32 => (
self.builtin_functions.memory_atomic_wait32(func),
memory_index.index(),
),
I64 => (
self.builtin_functions.memory_atomic_wait64(func),
memory_index.index(),
),
x => panic!("get_memory_atomic_wait unsupported type: {x:?}"),
}
}
fn get_global_location(
&mut self,
func: &mut ir::Function,
index: GlobalIndex,
) -> (ir::GlobalValue, i32) {
let pointer_type = self.pointer_type();
let vmctx = self.vmctx(func);
if let Some(def_index) = self.module.defined_global_index(index) {
let offset = i32::try_from(self.offsets.vmctx_vmglobal_definition(def_index)).unwrap();
(vmctx, offset)
} else {
let from_offset = self.offsets.vmctx_vmglobal_import_from(index);
let global = func.create_global_value(ir::GlobalValueData::Load {
base: vmctx,
offset: Offset32::new(i32::try_from(from_offset).unwrap()),
global_type: pointer_type,
flags: MemFlags::trusted().with_readonly(),
});
(global, 0)
}
}
fn declare_vmstore_context_ptr(&mut self, builder: &mut FunctionBuilder<'_>) {
let pointer_type = self.pointer_type();
let vmctx = self.vmctx(builder.func);
let base = builder.ins().global_value(pointer_type, vmctx);
let offset = i32::from(self.offsets.ptr.vmctx_runtime_limits());
debug_assert!(self.vmstore_context_ptr.is_reserved_value());
self.vmstore_context_ptr =
builder
.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
}
fn fuel_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
builder.declare_var(self.fuel_var, ir::types::I64);
self.fuel_load_into_var(builder);
self.fuel_check(builder);
}
fn fuel_function_exit(&mut self, builder: &mut FunctionBuilder<'_>) {
self.fuel_save_from_var(builder);
}
fn fuel_before_op(
&mut self,
op: &Operator<'_>,
builder: &mut FunctionBuilder<'_>,
reachable: bool,
) {
if !reachable {
debug_assert_eq!(self.fuel_consumed, 0);
return;
}
self.fuel_consumed += match op {
Operator::Nop | Operator::Drop => 0,
Operator::Block { .. }
| Operator::Loop { .. }
| Operator::Unreachable
| Operator::Return
| Operator::Else
| Operator::End => 0,
_ => 1,
};
match op {
Operator::Unreachable
| Operator::Return
| Operator::CallIndirect { .. }
| Operator::Call { .. }
| Operator::ReturnCall { .. }
| Operator::ReturnCallRef { .. }
| Operator::ReturnCallIndirect { .. } => {
self.fuel_increment_var(builder);
self.fuel_save_from_var(builder);
}
Operator::Loop { .. }
| Operator::If { .. }
| Operator::Br { .. }
| Operator::BrIf { .. }
| Operator::BrTable { .. }
| Operator::End
| Operator::Else => self.fuel_increment_var(builder),
_ => {}
}
}
fn fuel_after_op(&mut self, op: &Operator<'_>, builder: &mut FunctionBuilder<'_>) {
match op {
Operator::Call { .. } | Operator::CallIndirect { .. } => {
self.fuel_load_into_var(builder);
}
_ => {}
}
}
fn fuel_increment_var(&mut self, builder: &mut FunctionBuilder<'_>) {
let consumption = mem::replace(&mut self.fuel_consumed, 0);
if consumption == 0 {
return;
}
let fuel = builder.use_var(self.fuel_var);
let fuel = builder.ins().iadd_imm(fuel, consumption);
builder.def_var(self.fuel_var, fuel);
}
fn fuel_load_into_var(&mut self, builder: &mut FunctionBuilder<'_>) {
let (addr, offset) = self.fuel_addr_offset();
let fuel = builder
.ins()
.load(ir::types::I64, ir::MemFlags::trusted(), addr, offset);
builder.def_var(self.fuel_var, fuel);
}
fn fuel_save_from_var(&mut self, builder: &mut FunctionBuilder<'_>) {
let (addr, offset) = self.fuel_addr_offset();
let fuel_consumed = builder.use_var(self.fuel_var);
builder
.ins()
.store(ir::MemFlags::trusted(), fuel_consumed, addr, offset);
}
fn fuel_addr_offset(&mut self) -> (ir::Value, ir::immediates::Offset32) {
debug_assert!(!self.vmstore_context_ptr.is_reserved_value());
(
self.vmstore_context_ptr,
i32::from(self.offsets.ptr.vmstore_context_fuel_consumed()).into(),
)
}
fn fuel_check(&mut self, builder: &mut FunctionBuilder) {
self.fuel_increment_var(builder);
let out_of_gas_block = builder.create_block();
let continuation_block = builder.create_block();
let zero = builder.ins().iconst(ir::types::I64, 0);
let fuel = builder.use_var(self.fuel_var);
let cmp = builder
.ins()
.icmp(IntCC::SignedGreaterThanOrEqual, fuel, zero);
builder
.ins()
.brif(cmp, out_of_gas_block, &[], continuation_block, &[]);
builder.seal_block(out_of_gas_block);
builder.switch_to_block(out_of_gas_block);
self.fuel_save_from_var(builder);
let out_of_gas = self.builtin_functions.out_of_gas(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(out_of_gas, &[vmctx]);
self.fuel_load_into_var(builder);
builder.ins().jump(continuation_block, &[]);
builder.seal_block(continuation_block);
builder.switch_to_block(continuation_block);
}
fn epoch_function_entry(&mut self, builder: &mut FunctionBuilder<'_>) {
builder.declare_var(self.epoch_deadline_var, ir::types::I64);
builder.declare_var(self.epoch_ptr_var, self.pointer_type());
let epoch_ptr = self.epoch_ptr(builder);
builder.def_var(self.epoch_ptr_var, epoch_ptr);
let continuation_block = builder.create_block();
let cur_epoch_value = self.epoch_load_current(builder);
self.epoch_check_full(builder, cur_epoch_value, continuation_block);
}
#[cfg(feature = "wmemcheck")]
fn hook_malloc_exit(&mut self, builder: &mut FunctionBuilder, retvals: &[ir::Value]) {
let check_malloc = self.builtin_functions.check_malloc(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let func_args = builder
.func
.dfg
.block_params(builder.func.layout.entry_block().unwrap());
let len = if func_args.len() < 3 {
return;
} else {
func_args[2]
};
let retval = if retvals.len() < 1 {
return;
} else {
retvals[0]
};
builder.ins().call(check_malloc, &[vmctx, retval, len]);
}
#[cfg(feature = "wmemcheck")]
fn hook_free_exit(&mut self, builder: &mut FunctionBuilder) {
let check_free = self.builtin_functions.check_free(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let func_args = builder
.func
.dfg
.block_params(builder.func.layout.entry_block().unwrap());
let ptr = if func_args.len() < 3 {
return;
} else {
func_args[2]
};
builder.ins().call(check_free, &[vmctx, ptr]);
}
fn epoch_ptr(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
let vmctx = self.vmctx(builder.func);
let pointer_type = self.pointer_type();
let base = builder.ins().global_value(pointer_type, vmctx);
let offset = i32::from(self.offsets.ptr.vmctx_epoch_ptr());
let epoch_ptr = builder
.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
epoch_ptr
}
fn epoch_load_current(&mut self, builder: &mut FunctionBuilder<'_>) -> ir::Value {
let addr = builder.use_var(self.epoch_ptr_var);
builder.ins().load(
ir::types::I64,
ir::MemFlags::trusted(),
addr,
ir::immediates::Offset32::new(0),
)
}
fn epoch_check(&mut self, builder: &mut FunctionBuilder<'_>) {
let continuation_block = builder.create_block();
let cur_epoch_value = self.epoch_load_current(builder);
self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
self.epoch_check_full(builder, cur_epoch_value, continuation_block);
}
fn epoch_check_cached(
&mut self,
builder: &mut FunctionBuilder,
cur_epoch_value: ir::Value,
continuation_block: ir::Block,
) {
let new_epoch_block = builder.create_block();
builder.set_cold_block(new_epoch_block);
let epoch_deadline = builder.use_var(self.epoch_deadline_var);
let cmp = builder.ins().icmp(
IntCC::UnsignedGreaterThanOrEqual,
cur_epoch_value,
epoch_deadline,
);
builder
.ins()
.brif(cmp, new_epoch_block, &[], continuation_block, &[]);
builder.seal_block(new_epoch_block);
builder.switch_to_block(new_epoch_block);
}
fn epoch_check_full(
&mut self,
builder: &mut FunctionBuilder,
cur_epoch_value: ir::Value,
continuation_block: ir::Block,
) {
let deadline = builder.ins().load(
ir::types::I64,
ir::MemFlags::trusted(),
self.vmstore_context_ptr,
ir::immediates::Offset32::new(self.offsets.ptr.vmstore_context_epoch_deadline() as i32),
);
builder.def_var(self.epoch_deadline_var, deadline);
self.epoch_check_cached(builder, cur_epoch_value, continuation_block);
let new_epoch = self.builtin_functions.new_epoch(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let call = builder.ins().call(new_epoch, &[vmctx]);
let new_deadline = *builder.func.dfg.inst_results(call).first().unwrap();
builder.def_var(self.epoch_deadline_var, new_deadline);
builder.ins().jump(continuation_block, &[]);
builder.seal_block(continuation_block);
builder.switch_to_block(continuation_block);
}
fn memory(&self, index: MemoryIndex) -> Memory {
self.module.memories[index]
}
fn table(&self, index: TableIndex) -> Table {
self.module.tables[index]
}
fn cast_index_to_i64(
&self,
pos: &mut FuncCursor<'_>,
val: ir::Value,
index_type: IndexType,
) -> ir::Value {
match index_type {
IndexType::I32 => pos.ins().uextend(I64, val),
IndexType::I64 => val,
}
}
fn convert_pointer_to_index_type(
&self,
mut pos: FuncCursor<'_>,
val: ir::Value,
index_type: IndexType,
single_byte_pages: bool,
) -> ir::Value {
let desired_type = index_type_to_ir_type(index_type);
let pointer_type = self.pointer_type();
assert_eq!(pos.func.dfg.value_type(val), pointer_type);
if pointer_type == desired_type {
val
} else if pointer_type.bits() > desired_type.bits() {
pos.ins().ireduce(desired_type, val)
} else {
match single_byte_pages {
false => {
pos.ins().sextend(desired_type, val)
}
true => {
let extended = pos.ins().uextend(desired_type, val);
let neg_one = pos.ins().iconst(desired_type, -1);
let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1);
pos.ins().select(is_failure, neg_one, extended)
}
}
}
}
fn ensure_table_exists(&mut self, func: &mut ir::Function, index: TableIndex) {
if self.tables[index].is_some() {
return;
}
let pointer_type = self.pointer_type();
let (ptr, base_offset, current_elements_offset) = {
let vmctx = self.vmctx(func);
if let Some(def_index) = self.module.defined_table_index(index) {
let base_offset =
i32::try_from(self.offsets.vmctx_vmtable_definition_base(def_index)).unwrap();
let current_elements_offset = i32::try_from(
self.offsets
.vmctx_vmtable_definition_current_elements(def_index),
)
.unwrap();
(vmctx, base_offset, current_elements_offset)
} else {
let from_offset = self.offsets.vmctx_vmtable_import_from(index);
let table = func.create_global_value(ir::GlobalValueData::Load {
base: vmctx,
offset: Offset32::new(i32::try_from(from_offset).unwrap()),
global_type: pointer_type,
flags: MemFlags::trusted().with_readonly(),
});
let base_offset = i32::from(self.offsets.vmtable_definition_base());
let current_elements_offset =
i32::from(self.offsets.vmtable_definition_current_elements());
(table, base_offset, current_elements_offset)
}
};
let table = &self.module.tables[index];
let element_size = if table.ref_type.is_vmgcref_type() {
ir::types::I32.bytes()
} else {
self.reference_type(table.ref_type.heap_type).0.bytes()
};
let base_gv = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(base_offset),
global_type: pointer_type,
flags: if Some(table.limits.min) == table.limits.max {
MemFlags::trusted().with_readonly()
} else {
MemFlags::trusted()
},
});
let bound = if Some(table.limits.min) == table.limits.max {
TableSize::Static {
bound: table.limits.min,
}
} else {
TableSize::Dynamic {
bound_gv: func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(current_elements_offset),
global_type: ir::Type::int(
u16::from(self.offsets.size_of_vmtable_definition_current_elements()) * 8,
)
.unwrap(),
flags: MemFlags::trusted(),
}),
}
};
self.tables[index] = Some(TableData {
base_gv,
bound,
element_size,
});
}
fn get_or_init_func_ref_table_elem(
&mut self,
builder: &mut FunctionBuilder,
table_index: TableIndex,
index: ir::Value,
cold_blocks: bool,
) -> ir::Value {
let pointer_type = self.pointer_type();
self.ensure_table_exists(builder.func, table_index);
let table_data = self.tables[table_index].clone().unwrap();
let (table_entry_addr, flags) = table_data.prepare_table_addr(self, builder, index);
let value = builder.ins().load(pointer_type, flags, table_entry_addr, 0);
if !self.tunables.table_lazy_init {
return value;
}
assert_eq!(FUNCREF_MASK as isize, -2);
let value_masked = builder.ins().band_imm(value, Imm64::from(-2));
let null_block = builder.create_block();
let continuation_block = builder.create_block();
if cold_blocks {
builder.set_cold_block(null_block);
builder.set_cold_block(continuation_block);
}
let result_param = builder.append_block_param(continuation_block, pointer_type);
builder.set_cold_block(null_block);
builder
.ins()
.brif(value, continuation_block, &[value_masked], null_block, &[]);
builder.seal_block(null_block);
builder.switch_to_block(null_block);
let index_type = self.table(table_index).idx_type;
let table_index = builder.ins().iconst(I32, table_index.index() as i64);
let lazy_init = self
.builtin_functions
.table_get_lazy_init_func_ref(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let index = self.cast_index_to_i64(&mut builder.cursor(), index, index_type);
let call_inst = builder.ins().call(lazy_init, &[vmctx, table_index, index]);
let returned_entry = builder.func.dfg.inst_results(call_inst)[0];
builder.ins().jump(continuation_block, &[returned_entry]);
builder.seal_block(continuation_block);
builder.switch_to_block(continuation_block);
result_param
}
#[cfg(feature = "wmemcheck")]
fn check_malloc_start(&mut self, builder: &mut FunctionBuilder) {
let malloc_start = self.builtin_functions.malloc_start(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(malloc_start, &[vmctx]);
}
#[cfg(feature = "wmemcheck")]
fn check_free_start(&mut self, builder: &mut FunctionBuilder) {
let free_start = self.builtin_functions.free_start(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(free_start, &[vmctx]);
}
#[cfg(feature = "wmemcheck")]
fn current_func_name(&self, builder: &mut FunctionBuilder) -> Option<&str> {
let func_index = match &builder.func.name {
ir::UserFuncName::User(user) => FuncIndex::from_u32(user.index),
_ => {
panic!("function name not a UserFuncName::User as expected")
}
};
self.translation
.debuginfo
.name_section
.func_names
.get(&func_index)
.copied()
}
fn create_empty_struct_memtype(&self, func: &mut ir::Function) -> ir::MemoryType {
func.create_memory_type(ir::MemoryTypeData::Struct {
size: 0,
fields: vec![],
})
}
fn add_field_to_memtype(
&self,
func: &mut ir::Function,
memtype: ir::MemoryType,
offset: u32,
pointee: ir::MemoryType,
readonly: bool,
) {
let ptr_size = self.pointer_type().bytes();
match &mut func.memory_types[memtype] {
ir::MemoryTypeData::Struct { size, fields } => {
*size = std::cmp::max(*size, offset.checked_add(ptr_size).unwrap().into());
fields.push(ir::MemoryTypeField {
ty: self.pointer_type(),
offset: offset.into(),
readonly,
fact: Some(ir::Fact::Mem {
ty: pointee,
min_offset: 0,
max_offset: 0,
nullable: false,
}),
});
fields.sort_by_key(|f| f.offset);
}
_ => panic!("Cannot add field to non-struct memtype"),
}
}
fn load_pointer_with_memtypes(
&self,
func: &mut ir::Function,
value: ir::GlobalValue,
offset: u32,
readonly: bool,
memtype: Option<ir::MemoryType>,
) -> (ir::GlobalValue, Option<ir::MemoryType>) {
let pointee = func.create_global_value(ir::GlobalValueData::Load {
base: value,
offset: Offset32::new(i32::try_from(offset).unwrap()),
global_type: self.pointer_type(),
flags: MemFlags::trusted().with_readonly(),
});
let mt = memtype.map(|mt| {
let pointee_mt = self.create_empty_struct_memtype(func);
self.add_field_to_memtype(func, mt, offset, pointee_mt, readonly);
func.global_value_facts[pointee] = Some(Fact::Mem {
ty: pointee_mt,
min_offset: 0,
max_offset: 0,
nullable: false,
});
pointee_mt
});
(pointee, mt)
}
pub fn conditionally_trap(
&mut self,
builder: &mut FunctionBuilder,
trap_cond: ir::Value,
trap: ir::TrapCode,
) {
assert!(!self.clif_instruction_traps_enabled());
let trap_block = builder.create_block();
builder.set_cold_block(trap_block);
let continuation_block = builder.create_block();
builder
.ins()
.brif(trap_cond, trap_block, &[], continuation_block, &[]);
builder.seal_block(trap_block);
builder.seal_block(continuation_block);
builder.switch_to_block(trap_block);
self.trap(builder, trap);
builder.switch_to_block(continuation_block);
}
fn guard_zero_divisor(&mut self, builder: &mut FunctionBuilder, rhs: ir::Value) {
if self.clif_instruction_traps_enabled() {
return;
}
self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
}
fn guard_signed_divide(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
) {
if self.clif_instruction_traps_enabled() {
return;
}
self.trapz(builder, rhs, ir::TrapCode::INTEGER_DIVISION_BY_ZERO);
let ty = builder.func.dfg.value_type(rhs);
let minus_one = builder.ins().iconst(ty, -1);
let rhs_is_minus_one = builder.ins().icmp(IntCC::Equal, rhs, minus_one);
let int_min = builder.ins().iconst(
ty,
match ty {
I32 => i64::from(i32::MIN),
I64 => i64::MIN,
_ => unreachable!(),
},
);
let lhs_is_int_min = builder.ins().icmp(IntCC::Equal, lhs, int_min);
let is_integer_overflow = builder.ins().band(rhs_is_minus_one, lhs_is_int_min);
self.conditionally_trap(builder, is_integer_overflow, ir::TrapCode::INTEGER_OVERFLOW);
}
fn guard_fcvt_to_int(
&mut self,
builder: &mut FunctionBuilder,
ty: ir::Type,
val: ir::Value,
range32: (f64, f64),
range64: (f64, f64),
) {
assert!(!self.clif_instruction_traps_enabled());
let val_ty = builder.func.dfg.value_type(val);
let val = if val_ty == F64 {
val
} else {
builder.ins().fpromote(F64, val)
};
let isnan = builder.ins().fcmp(FloatCC::NotEqual, val, val);
self.trapnz(builder, isnan, ir::TrapCode::BAD_CONVERSION_TO_INTEGER);
let val = builder.ins().trunc(val);
let (lower_bound, upper_bound) = match ty {
I32 => range32,
I64 => range64,
_ => unreachable!(),
};
let lower_bound = builder.ins().f64const(lower_bound);
let too_small = builder
.ins()
.fcmp(FloatCC::LessThanOrEqual, val, lower_bound);
self.trapnz(builder, too_small, ir::TrapCode::INTEGER_OVERFLOW);
let upper_bound = builder.ins().f64const(upper_bound);
let too_large = builder
.ins()
.fcmp(FloatCC::GreaterThanOrEqual, val, upper_bound);
self.trapnz(builder, too_large, ir::TrapCode::INTEGER_OVERFLOW);
}
pub(crate) fn vmshared_type_index_ty(&self) -> Type {
Type::int_with_byte_size(self.offsets.size_of_vmshared_type_index().into()).unwrap()
}
pub(crate) fn module_interned_to_shared_ty(
&mut self,
pos: &mut FuncCursor,
interned_ty: ModuleInternedTypeIndex,
) -> ir::Value {
let vmctx = self.vmctx_val(pos);
let pointer_type = self.pointer_type();
let mem_flags = ir::MemFlags::trusted().with_readonly();
let shared_indices = pos.ins().load(
pointer_type,
mem_flags,
vmctx,
i32::from(self.offsets.ptr.vmctx_type_ids_array()),
);
let ty = self.vmshared_type_index_ty();
let offset = i32::try_from(interned_ty.as_u32().checked_mul(ty.bytes()).unwrap()).unwrap();
pos.ins().load(ty, mem_flags, shared_indices, offset)
}
pub(crate) fn load_funcref_type_index(
&mut self,
pos: &mut FuncCursor,
mem_flags: ir::MemFlags,
funcref: ir::Value,
) -> ir::Value {
let ty = self.vmshared_type_index_ty();
pos.ins().load(
ty,
mem_flags,
funcref,
i32::from(self.offsets.ptr.vm_func_ref_type_index()),
)
}
}
struct Call<'a, 'func, 'module_env> {
builder: &'a mut FunctionBuilder<'func>,
env: &'a mut FuncEnvironment<'module_env>,
tail: bool,
}
enum CheckIndirectCallTypeSignature {
Runtime,
StaticMatch {
may_be_null: bool,
},
StaticTrap,
}
impl<'a, 'func, 'module_env> Call<'a, 'func, 'module_env> {
pub fn new(
builder: &'a mut FunctionBuilder<'func>,
env: &'a mut FuncEnvironment<'module_env>,
) -> Self {
Call {
builder,
env,
tail: false,
}
}
pub fn new_tail(
builder: &'a mut FunctionBuilder<'func>,
env: &'a mut FuncEnvironment<'module_env>,
) -> Self {
Call {
builder,
env,
tail: true,
}
}
pub fn direct_call(
mut self,
callee_index: FuncIndex,
callee: ir::FuncRef,
call_args: &[ir::Value],
) -> WasmResult<ir::Inst> {
let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
let caller_vmctx = self
.builder
.func
.special_param(ArgumentPurpose::VMContext)
.unwrap();
if !self.env.module.is_imported_function(callee_index) {
real_call_args.push(caller_vmctx);
real_call_args.push(caller_vmctx);
real_call_args.extend_from_slice(call_args);
return Ok(self.direct_call_inst(callee, &real_call_args));
}
let pointer_type = self.env.pointer_type();
let sig_ref = self.builder.func.dfg.ext_funcs[callee].signature;
let vmctx = self.env.vmctx(self.builder.func);
let base = self.builder.ins().global_value(pointer_type, vmctx);
let mem_flags = ir::MemFlags::trusted().with_readonly();
let body_offset = i32::try_from(
self.env
.offsets
.vmctx_vmfunction_import_wasm_call(callee_index),
)
.unwrap();
let func_addr = self
.builder
.ins()
.load(pointer_type, mem_flags, base, body_offset);
let vmctx_offset =
i32::try_from(self.env.offsets.vmctx_vmfunction_import_vmctx(callee_index)).unwrap();
let vmctx = self
.builder
.ins()
.load(pointer_type, mem_flags, base, vmctx_offset);
real_call_args.push(vmctx);
real_call_args.push(caller_vmctx);
real_call_args.extend_from_slice(call_args);
Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
}
pub fn indirect_call(
mut self,
features: &WasmFeatures,
table_index: TableIndex,
ty_index: TypeIndex,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<Option<ir::Inst>> {
let (code_ptr, callee_vmctx) = match self.check_and_load_code_and_callee_vmctx(
features,
table_index,
ty_index,
callee,
false,
)? {
Some(pair) => pair,
None => return Ok(None),
};
self.unchecked_call_impl(sig_ref, code_ptr, callee_vmctx, call_args)
.map(Some)
}
fn check_and_load_code_and_callee_vmctx(
&mut self,
features: &WasmFeatures,
table_index: TableIndex,
ty_index: TypeIndex,
callee: ir::Value,
cold_blocks: bool,
) -> WasmResult<Option<(ir::Value, ir::Value)>> {
let funcref_ptr = self.env.get_or_init_func_ref_table_elem(
self.builder,
table_index,
callee,
cold_blocks,
);
let check =
self.check_indirect_call_type_signature(features, table_index, ty_index, funcref_ptr);
let trap_code = match check {
CheckIndirectCallTypeSignature::Runtime => None,
CheckIndirectCallTypeSignature::StaticMatch { may_be_null } => {
if may_be_null {
Some(crate::TRAP_INDIRECT_CALL_TO_NULL)
} else {
None
}
}
CheckIndirectCallTypeSignature::StaticTrap => return Ok(None),
};
Ok(Some(self.load_code_and_vmctx(funcref_ptr, trap_code)))
}
fn check_indirect_call_type_signature(
&mut self,
features: &WasmFeatures,
table_index: TableIndex,
ty_index: TypeIndex,
funcref_ptr: ir::Value,
) -> CheckIndirectCallTypeSignature {
let table = &self.env.module.tables[table_index];
let sig_id_size = self.env.offsets.size_of_vmshared_type_index();
let sig_id_type = Type::int(u16::from(sig_id_size) * 8).unwrap();
match table.ref_type.heap_type {
WasmHeapType::Func => {}
WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Module(table_ty)) => {
let specified_ty = self.env.module.types[ty_index].unwrap_module_type_index();
if specified_ty == table_ty {
return CheckIndirectCallTypeSignature::StaticMatch {
may_be_null: table.ref_type.nullable,
};
}
if features.gc() {
} else {
if table.ref_type.nullable {
if self.env.clif_memory_traps_enabled() {
let mem_flags = ir::MemFlags::trusted().with_readonly();
self.builder.ins().load(
sig_id_type,
mem_flags.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL)),
funcref_ptr,
i32::from(self.env.offsets.ptr.vm_func_ref_type_index()),
);
} else {
self.env.trapz(
self.builder,
funcref_ptr,
crate::TRAP_INDIRECT_CALL_TO_NULL,
);
}
}
self.env.trap(self.builder, crate::TRAP_BAD_SIGNATURE);
return CheckIndirectCallTypeSignature::StaticTrap;
}
}
WasmHeapType::NoFunc => {
assert!(table.ref_type.nullable);
self.env
.trap(self.builder, crate::TRAP_INDIRECT_CALL_TO_NULL);
return CheckIndirectCallTypeSignature::StaticTrap;
}
WasmHeapType::Cont | WasmHeapType::ConcreteCont(_) | WasmHeapType::NoCont => todo!(), WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::Engine(_))
| WasmHeapType::ConcreteFunc(EngineOrModuleTypeIndex::RecGroup(_))
| WasmHeapType::Extern
| WasmHeapType::NoExtern
| WasmHeapType::Any
| WasmHeapType::Eq
| WasmHeapType::I31
| WasmHeapType::Array
| WasmHeapType::ConcreteArray(_)
| WasmHeapType::Struct
| WasmHeapType::ConcreteStruct(_)
| WasmHeapType::None => {
unreachable!()
}
}
let interned_ty = self.env.module.types[ty_index].unwrap_module_type_index();
let caller_sig_id = self
.env
.module_interned_to_shared_ty(&mut self.builder.cursor(), interned_ty);
let mut mem_flags = ir::MemFlags::trusted().with_readonly();
if self.env.clif_memory_traps_enabled() {
mem_flags = mem_flags.with_trap_code(Some(crate::TRAP_INDIRECT_CALL_TO_NULL));
} else {
self.env
.trapz(self.builder, funcref_ptr, crate::TRAP_INDIRECT_CALL_TO_NULL);
}
let callee_sig_id =
self.env
.load_funcref_type_index(&mut self.builder.cursor(), mem_flags, funcref_ptr);
let matches = if features.gc() {
#[cfg(feature = "gc")]
{
self.env
.is_subtype(self.builder, callee_sig_id, caller_sig_id)
}
#[cfg(not(feature = "gc"))]
{
unreachable!()
}
} else {
self.builder
.ins()
.icmp(IntCC::Equal, callee_sig_id, caller_sig_id)
};
self.env
.trapz(self.builder, matches, crate::TRAP_BAD_SIGNATURE);
CheckIndirectCallTypeSignature::Runtime
}
pub fn call_ref(
mut self,
sig_ref: ir::SigRef,
callee: ir::Value,
args: &[ir::Value],
) -> WasmResult<ir::Inst> {
let callee_load_trap_code = Some(crate::TRAP_NULL_REFERENCE);
self.unchecked_call(sig_ref, callee, callee_load_trap_code, args)
}
fn unchecked_call(
&mut self,
sig_ref: ir::SigRef,
callee: ir::Value,
callee_load_trap_code: Option<ir::TrapCode>,
call_args: &[ir::Value],
) -> WasmResult<ir::Inst> {
let (func_addr, callee_vmctx) = self.load_code_and_vmctx(callee, callee_load_trap_code);
self.unchecked_call_impl(sig_ref, func_addr, callee_vmctx, call_args)
}
fn load_code_and_vmctx(
&mut self,
callee: ir::Value,
callee_load_trap_code: Option<ir::TrapCode>,
) -> (ir::Value, ir::Value) {
let pointer_type = self.env.pointer_type();
let mem_flags = ir::MemFlags::trusted().with_readonly();
let mut callee_flags = mem_flags;
if self.env.clif_memory_traps_enabled() {
callee_flags = callee_flags.with_trap_code(callee_load_trap_code);
} else {
if let Some(trap) = callee_load_trap_code {
self.env.trapz(self.builder, callee, trap);
}
}
let func_addr = self.builder.ins().load(
pointer_type,
callee_flags,
callee,
i32::from(self.env.offsets.ptr.vm_func_ref_wasm_call()),
);
let callee_vmctx = self.builder.ins().load(
pointer_type,
mem_flags,
callee,
i32::from(self.env.offsets.ptr.vm_func_ref_vmctx()),
);
(func_addr, callee_vmctx)
}
fn unchecked_call_impl(
&mut self,
sig_ref: ir::SigRef,
func_addr: ir::Value,
callee_vmctx: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<ir::Inst> {
let mut real_call_args = Vec::with_capacity(call_args.len() + 2);
let caller_vmctx = self
.builder
.func
.special_param(ArgumentPurpose::VMContext)
.unwrap();
real_call_args.push(callee_vmctx);
real_call_args.push(caller_vmctx);
real_call_args.extend_from_slice(call_args);
Ok(self.indirect_call_inst(sig_ref, func_addr, &real_call_args))
}
fn direct_call_inst(&mut self, callee: ir::FuncRef, args: &[ir::Value]) -> ir::Inst {
if self.tail {
self.builder.ins().return_call(callee, args)
} else {
let inst = self.builder.ins().call(callee, args);
let results: SmallVec<[_; 4]> = self
.builder
.func
.dfg
.inst_results(inst)
.iter()
.copied()
.collect();
for (i, val) in results.into_iter().enumerate() {
if self
.env
.func_ref_result_needs_stack_map(&self.builder.func, callee, i)
{
self.builder.declare_value_needs_stack_map(val);
}
}
inst
}
}
fn indirect_call_inst(
&mut self,
sig_ref: ir::SigRef,
func_addr: ir::Value,
args: &[ir::Value],
) -> ir::Inst {
if self.tail {
self.builder
.ins()
.return_call_indirect(sig_ref, func_addr, args)
} else {
let inst = self.builder.ins().call_indirect(sig_ref, func_addr, args);
let results: SmallVec<[_; 4]> = self
.builder
.func
.dfg
.inst_results(inst)
.iter()
.copied()
.collect();
for (i, val) in results.into_iter().enumerate() {
if self.env.sig_ref_result_needs_stack_map(sig_ref, i) {
self.builder.declare_value_needs_stack_map(val);
}
}
inst
}
}
}
impl TypeConvert for FuncEnvironment<'_> {
fn lookup_heap_type(&self, ty: wasmparser::UnpackedIndex) -> WasmHeapType {
wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
self.module.types[idx].unwrap_module_type_index()
})
.lookup_heap_type(ty)
}
fn lookup_type_index(&self, index: wasmparser::UnpackedIndex) -> EngineOrModuleTypeIndex {
wasmtime_environ::WasmparserTypeConverter::new(self.types, |idx| {
self.module.types[idx].unwrap_module_type_index()
})
.lookup_type_index(index)
}
}
impl<'module_environment> TargetEnvironment for FuncEnvironment<'module_environment> {
fn target_config(&self) -> TargetFrontendConfig {
self.isa.frontend_config()
}
fn reference_type(&self, wasm_ty: WasmHeapType) -> (ir::Type, bool) {
let ty = crate::reference_type(wasm_ty, self.pointer_type());
let needs_stack_map = match wasm_ty.top() {
WasmHeapTopType::Extern | WasmHeapTopType::Any => true,
WasmHeapTopType::Func => false,
WasmHeapTopType::Cont => todo!(), };
(ty, needs_stack_map)
}
fn heap_access_spectre_mitigation(&self) -> bool {
self.isa.flags().enable_heap_access_spectre_mitigation()
}
fn proof_carrying_code(&self) -> bool {
self.isa.flags().enable_pcc()
}
fn tunables(&self) -> &Tunables {
self.compiler.tunables()
}
}
impl FuncEnvironment<'_> {
pub fn heaps(&self) -> &PrimaryMap<Heap, HeapData> {
&self.heaps
}
pub fn is_wasm_parameter(&self, _signature: &ir::Signature, index: usize) -> bool {
index >= 2
}
pub fn param_needs_stack_map(&self, _signature: &ir::Signature, index: usize) -> bool {
if index < 2 {
return false;
}
self.wasm_func_ty.params()[index - 2].is_vmgcref_type_and_not_i31()
}
pub fn sig_ref_result_needs_stack_map(&self, sig_ref: ir::SigRef, index: usize) -> bool {
let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
}
pub fn func_ref_result_needs_stack_map(
&self,
func: &ir::Function,
func_ref: ir::FuncRef,
index: usize,
) -> bool {
let sig_ref = func.dfg.ext_funcs[func_ref].signature;
let wasm_func_ty = self.sig_ref_to_ty[sig_ref].as_ref().unwrap();
wasm_func_ty.returns()[index].is_vmgcref_type_and_not_i31()
}
pub fn after_locals(&mut self, num_locals: usize) {
self.fuel_var = Variable::new(num_locals);
self.epoch_deadline_var = Variable::new(num_locals + 1);
self.epoch_ptr_var = Variable::new(num_locals + 2);
}
pub fn translate_table_grow(
&mut self,
builder: &mut FunctionBuilder<'_>,
table_index: TableIndex,
delta: ir::Value,
init_value: ir::Value,
) -> WasmResult<ir::Value> {
let mut pos = builder.cursor();
let table = self.table(table_index);
let ty = table.ref_type.heap_type;
let grow = if ty.is_vmgcref_type() {
gc::builtins::table_grow_gc_ref(self, &mut pos.func)?
} else {
debug_assert_eq!(ty.top(), WasmHeapTopType::Func);
self.builtin_functions.table_grow_func_ref(&mut pos.func)
};
let vmctx = self.vmctx_val(&mut pos);
let index_type = table.idx_type;
let delta = self.cast_index_to_i64(&mut pos, delta, index_type);
let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
let call_inst = pos
.ins()
.call(grow, &[vmctx, table_index_arg, delta, init_value]);
let result = pos.func.dfg.first_result(call_inst);
Ok(self.convert_pointer_to_index_type(builder.cursor(), result, index_type, false))
}
pub fn translate_table_get(
&mut self,
builder: &mut FunctionBuilder,
table_index: TableIndex,
index: ir::Value,
) -> WasmResult<ir::Value> {
let table = self.module.tables[table_index];
self.ensure_table_exists(builder.func, table_index);
let table_data = self.tables[table_index].clone().unwrap();
let heap_ty = table.ref_type.heap_type;
match heap_ty.top() {
WasmHeapTopType::Any | WasmHeapTopType::Extern => {
let (src, flags) = table_data.prepare_table_addr(self, builder, index);
gc::gc_compiler(self)?.translate_read_gc_reference(
self,
builder,
table.ref_type,
src,
flags,
)
}
WasmHeapTopType::Func => {
Ok(self.get_or_init_func_ref_table_elem(builder, table_index, index, false))
}
WasmHeapTopType::Cont => todo!(), }
}
pub fn translate_table_set(
&mut self,
builder: &mut FunctionBuilder,
table_index: TableIndex,
value: ir::Value,
index: ir::Value,
) -> WasmResult<()> {
let table = self.module.tables[table_index];
self.ensure_table_exists(builder.func, table_index);
let table_data = self.tables[table_index].clone().unwrap();
let heap_ty = table.ref_type.heap_type;
match heap_ty.top() {
WasmHeapTopType::Any | WasmHeapTopType::Extern => {
let (dst, flags) = table_data.prepare_table_addr(self, builder, index);
gc::gc_compiler(self)?.translate_write_gc_reference(
self,
builder,
table.ref_type,
dst,
value,
flags,
)
}
WasmHeapTopType::Func => {
let (elem_addr, flags) = table_data.prepare_table_addr(self, builder, index);
let value_with_init_bit = if self.tunables.table_lazy_init {
builder
.ins()
.bor_imm(value, Imm64::from(FUNCREF_INIT_BIT as i64))
} else {
value
};
builder
.ins()
.store(flags, value_with_init_bit, elem_addr, 0);
Ok(())
}
WasmHeapTopType::Cont => todo!(), }
}
pub fn translate_table_fill(
&mut self,
builder: &mut FunctionBuilder<'_>,
table_index: TableIndex,
dst: ir::Value,
val: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let mut pos = builder.cursor();
let table = self.table(table_index);
let index_type = table.idx_type;
let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
let len = self.cast_index_to_i64(&mut pos, len, index_type);
let ty = table.ref_type.heap_type;
let libcall = if ty.is_vmgcref_type() {
gc::builtins::table_fill_gc_ref(self, &mut pos.func)?
} else {
debug_assert_eq!(ty.top(), WasmHeapTopType::Func);
self.builtin_functions.table_fill_func_ref(&mut pos.func)
};
let vmctx = self.vmctx_val(&mut pos);
let table_index_arg = pos.ins().iconst(I32, table_index.as_u32() as i64);
pos.ins()
.call(libcall, &[vmctx, table_index_arg, dst, val, len]);
Ok(())
}
pub fn translate_ref_i31(
&mut self,
mut pos: FuncCursor,
val: ir::Value,
) -> WasmResult<ir::Value> {
debug_assert_eq!(pos.func.dfg.value_type(val), ir::types::I32);
let shifted = pos.ins().ishl_imm(val, 1);
let tagged = pos
.ins()
.bor_imm(shifted, i64::from(crate::I31_REF_DISCRIMINANT));
let (ref_ty, _needs_stack_map) = self.reference_type(WasmHeapType::I31);
debug_assert_eq!(ref_ty, ir::types::I32);
Ok(tagged)
}
pub fn translate_i31_get_s(
&mut self,
builder: &mut FunctionBuilder,
i31ref: ir::Value,
) -> WasmResult<ir::Value> {
self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
Ok(builder.ins().sshr_imm(i31ref, 1))
}
pub fn translate_i31_get_u(
&mut self,
builder: &mut FunctionBuilder,
i31ref: ir::Value,
) -> WasmResult<ir::Value> {
self.trapz(builder, i31ref, crate::TRAP_NULL_REFERENCE);
Ok(builder.ins().ushr_imm(i31ref, 1))
}
pub fn struct_fields_len(&mut self, struct_type_index: TypeIndex) -> WasmResult<usize> {
let ty = self.module.types[struct_type_index].unwrap_module_type_index();
match &self.types[ty].composite_type.inner {
WasmCompositeInnerType::Struct(s) => Ok(s.fields.len()),
_ => unreachable!(),
}
}
pub fn translate_struct_new(
&mut self,
builder: &mut FunctionBuilder,
struct_type_index: TypeIndex,
fields: StructFieldsVec,
) -> WasmResult<ir::Value> {
gc::translate_struct_new(self, builder, struct_type_index, &fields)
}
pub fn translate_struct_new_default(
&mut self,
builder: &mut FunctionBuilder,
struct_type_index: TypeIndex,
) -> WasmResult<ir::Value> {
gc::translate_struct_new_default(self, builder, struct_type_index)
}
pub fn translate_struct_get(
&mut self,
builder: &mut FunctionBuilder,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
gc::translate_struct_get(
self,
builder,
struct_type_index,
field_index,
struct_ref,
extension,
)
}
pub fn translate_struct_set(
&mut self,
builder: &mut FunctionBuilder,
struct_type_index: TypeIndex,
field_index: u32,
struct_ref: ir::Value,
value: ir::Value,
) -> WasmResult<()> {
gc::translate_struct_set(
self,
builder,
struct_type_index,
field_index,
struct_ref,
value,
)
}
pub fn translate_array_new(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elem: ir::Value,
len: ir::Value,
) -> WasmResult<ir::Value> {
gc::translate_array_new(self, builder, array_type_index, elem, len)
}
pub fn translate_array_new_default(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
len: ir::Value,
) -> WasmResult<ir::Value> {
gc::translate_array_new_default(self, builder, array_type_index, len)
}
pub fn translate_array_new_fixed(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elems: &[ir::Value],
) -> WasmResult<ir::Value> {
gc::translate_array_new_fixed(self, builder, array_type_index, elems)
}
pub fn translate_array_new_data(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
data_index: DataIndex,
data_offset: ir::Value,
len: ir::Value,
) -> WasmResult<ir::Value> {
let libcall = gc::builtins::array_new_data(self, builder.func)?;
let vmctx = self.vmctx_val(&mut builder.cursor());
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
let interned_type_index = builder
.ins()
.iconst(I32, i64::from(interned_type_index.as_u32()));
let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
let call_inst = builder.ins().call(
libcall,
&[vmctx, interned_type_index, data_index, data_offset, len],
);
let result = builder.func.dfg.first_result(call_inst);
Ok(builder.ins().ireduce(ir::types::I32, result))
}
pub fn translate_array_new_elem(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
elem_index: ElemIndex,
elem_offset: ir::Value,
len: ir::Value,
) -> WasmResult<ir::Value> {
let libcall = gc::builtins::array_new_elem(self, builder.func)?;
let vmctx = self.vmctx_val(&mut builder.cursor());
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
let interned_type_index = builder
.ins()
.iconst(I32, i64::from(interned_type_index.as_u32()));
let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
let call_inst = builder.ins().call(
libcall,
&[vmctx, interned_type_index, elem_index, elem_offset, len],
);
let result = builder.func.dfg.first_result(call_inst);
Ok(builder.ins().ireduce(ir::types::I32, result))
}
pub fn translate_array_copy(
&mut self,
builder: &mut FunctionBuilder,
_dst_array_type_index: TypeIndex,
dst_array: ir::Value,
dst_index: ir::Value,
_src_array_type_index: TypeIndex,
src_array: ir::Value,
src_index: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let libcall = gc::builtins::array_copy(self, builder.func)?;
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(
libcall,
&[vmctx, dst_array, dst_index, src_array, src_index, len],
);
Ok(())
}
pub fn translate_array_fill(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array: ir::Value,
index: ir::Value,
value: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
gc::translate_array_fill(self, builder, array_type_index, array, index, value, len)
}
pub fn translate_array_init_data(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array: ir::Value,
dst_index: ir::Value,
data_index: DataIndex,
data_offset: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let libcall = gc::builtins::array_init_data(self, builder.func)?;
let vmctx = self.vmctx_val(&mut builder.cursor());
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
let interned_type_index = builder
.ins()
.iconst(I32, i64::from(interned_type_index.as_u32()));
let data_index = builder.ins().iconst(I32, i64::from(data_index.as_u32()));
builder.ins().call(
libcall,
&[
vmctx,
interned_type_index,
array,
dst_index,
data_index,
data_offset,
len,
],
);
Ok(())
}
pub fn translate_array_init_elem(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array: ir::Value,
dst_index: ir::Value,
elem_index: ElemIndex,
elem_offset: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let libcall = gc::builtins::array_init_elem(self, builder.func)?;
let vmctx = self.vmctx_val(&mut builder.cursor());
let interned_type_index = self.module.types[array_type_index].unwrap_module_type_index();
let interned_type_index = builder
.ins()
.iconst(I32, i64::from(interned_type_index.as_u32()));
let elem_index = builder.ins().iconst(I32, i64::from(elem_index.as_u32()));
builder.ins().call(
libcall,
&[
vmctx,
interned_type_index,
array,
dst_index,
elem_index,
elem_offset,
len,
],
);
Ok(())
}
pub fn translate_array_len(
&mut self,
builder: &mut FunctionBuilder,
array: ir::Value,
) -> WasmResult<ir::Value> {
gc::translate_array_len(self, builder, array)
}
pub fn translate_array_get(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array: ir::Value,
index: ir::Value,
extension: Option<Extension>,
) -> WasmResult<ir::Value> {
gc::translate_array_get(self, builder, array_type_index, array, index, extension)
}
pub fn translate_array_set(
&mut self,
builder: &mut FunctionBuilder,
array_type_index: TypeIndex,
array: ir::Value,
index: ir::Value,
value: ir::Value,
) -> WasmResult<()> {
gc::translate_array_set(self, builder, array_type_index, array, index, value)
}
pub fn translate_ref_test(
&mut self,
builder: &mut FunctionBuilder<'_>,
ref_ty: WasmRefType,
gc_ref: ir::Value,
) -> WasmResult<ir::Value> {
gc::translate_ref_test(self, builder, ref_ty, gc_ref)
}
pub fn translate_ref_null(
&mut self,
mut pos: cranelift_codegen::cursor::FuncCursor,
ht: WasmHeapType,
) -> WasmResult<ir::Value> {
Ok(match ht.top() {
WasmHeapTopType::Func => pos.ins().iconst(self.pointer_type(), 0),
WasmHeapTopType::Any | WasmHeapTopType::Extern => pos.ins().iconst(types::I32, 0),
WasmHeapTopType::Cont => todo!(), })
}
pub fn translate_ref_is_null(
&mut self,
mut pos: cranelift_codegen::cursor::FuncCursor,
value: ir::Value,
) -> WasmResult<ir::Value> {
let byte_is_null =
pos.ins()
.icmp_imm(cranelift_codegen::ir::condcodes::IntCC::Equal, value, 0);
Ok(pos.ins().uextend(ir::types::I32, byte_is_null))
}
pub fn translate_ref_func(
&mut self,
mut pos: cranelift_codegen::cursor::FuncCursor<'_>,
func_index: FuncIndex,
) -> WasmResult<ir::Value> {
let func_index = pos.ins().iconst(I32, func_index.as_u32() as i64);
let ref_func = self.builtin_functions.ref_func(&mut pos.func);
let vmctx = self.vmctx_val(&mut pos);
let call_inst = pos.ins().call(ref_func, &[vmctx, func_index]);
Ok(pos.func.dfg.first_result(call_inst))
}
pub fn translate_custom_global_get(
&mut self,
builder: &mut FunctionBuilder,
index: GlobalIndex,
) -> WasmResult<ir::Value> {
let ty = self.module.globals[index].wasm_ty;
debug_assert!(
ty.is_vmgcref_type(),
"We only use GlobalVariable::Custom for VMGcRef types"
);
let WasmValType::Ref(ty) = ty else {
unreachable!()
};
let (gv, offset) = self.get_global_location(builder.func, index);
let gv = builder.ins().global_value(self.pointer_type(), gv);
let src = builder.ins().iadd_imm(gv, i64::from(offset));
gc::gc_compiler(self)?.translate_read_gc_reference(
self,
builder,
ty,
src,
ir::MemFlags::trusted(),
)
}
pub fn translate_custom_global_set(
&mut self,
builder: &mut FunctionBuilder,
index: GlobalIndex,
value: ir::Value,
) -> WasmResult<()> {
let ty = self.module.globals[index].wasm_ty;
debug_assert!(
ty.is_vmgcref_type(),
"We only use GlobalVariable::Custom for VMGcRef types"
);
let WasmValType::Ref(ty) = ty else {
unreachable!()
};
let (gv, offset) = self.get_global_location(builder.func, index);
let gv = builder.ins().global_value(self.pointer_type(), gv);
let src = builder.ins().iadd_imm(gv, i64::from(offset));
gc::gc_compiler(self)?.translate_write_gc_reference(
self,
builder,
ty,
src,
value,
ir::MemFlags::trusted(),
)
}
pub fn make_heap(&mut self, func: &mut ir::Function, index: MemoryIndex) -> WasmResult<Heap> {
let pointer_type = self.pointer_type();
let memory = self.module.memories[index];
let is_shared = memory.shared;
let (ptr, base_offset, current_length_offset, ptr_memtype) = {
let vmctx = self.vmctx(func);
if let Some(def_index) = self.module.defined_memory_index(index) {
if is_shared {
let from_offset = self.offsets.vmctx_vmmemory_pointer(def_index);
let (memory, def_mt) = self.load_pointer_with_memtypes(
func,
vmctx,
from_offset,
true,
self.pcc_vmctx_memtype,
);
let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
let current_length_offset =
i32::from(self.offsets.ptr.vmmemory_definition_current_length());
(memory, base_offset, current_length_offset, def_mt)
} else {
let owned_index = self.module.owned_memory_index(def_index);
let owned_base_offset =
self.offsets.vmctx_vmmemory_definition_base(owned_index);
let owned_length_offset = self
.offsets
.vmctx_vmmemory_definition_current_length(owned_index);
let current_base_offset = i32::try_from(owned_base_offset).unwrap();
let current_length_offset = i32::try_from(owned_length_offset).unwrap();
(
vmctx,
current_base_offset,
current_length_offset,
self.pcc_vmctx_memtype,
)
}
} else {
let from_offset = self.offsets.vmctx_vmmemory_import_from(index);
let (memory, def_mt) = self.load_pointer_with_memtypes(
func,
vmctx,
from_offset,
true,
self.pcc_vmctx_memtype,
);
let base_offset = i32::from(self.offsets.ptr.vmmemory_definition_base());
let current_length_offset =
i32::from(self.offsets.ptr.vmmemory_definition_current_length());
(memory, base_offset, current_length_offset, def_mt)
}
};
let heap_bound = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(current_length_offset),
global_type: pointer_type,
flags: MemFlags::trusted(),
});
let host_page_size_log2 = self.target_config().page_size_align_log2;
let (base_fact, memory_type) = if !memory
.can_elide_bounds_check(self.tunables, host_page_size_log2)
{
if let Some(ptr_memtype) = ptr_memtype {
let data_mt = func.create_memory_type(ir::MemoryTypeData::DynamicMemory {
gv: heap_bound,
size: self.tunables.memory_guard_size,
});
let base_fact = ir::Fact::dynamic_base_ptr(data_mt);
let length_fact = ir::Fact::global_value(
u16::try_from(self.isa.pointer_type().bits()).unwrap(),
heap_bound,
);
match &mut func.memory_types[ptr_memtype] {
ir::MemoryTypeData::Struct { size, fields } => {
let base_offset = u64::try_from(base_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset: base_offset,
ty: self.isa.pointer_type(),
readonly: true,
fact: Some(base_fact.clone()),
});
let current_length_offset = u64::try_from(current_length_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset: current_length_offset,
ty: self.isa.pointer_type(),
readonly: true,
fact: Some(length_fact),
});
let pointer_size = u64::from(self.isa.pointer_type().bytes());
let fields_end = std::cmp::max(
base_offset + pointer_size,
current_length_offset + pointer_size,
);
*size = std::cmp::max(*size, fields_end);
}
_ => {
panic!("Bad memtype");
}
}
(Some(base_fact), Some(data_mt))
} else {
(None, None)
}
} else {
if let Some(ptr_memtype) = ptr_memtype {
let data_mt = func.create_memory_type(ir::MemoryTypeData::Memory {
size: self
.tunables
.memory_reservation
.checked_add(self.tunables.memory_guard_size)
.expect("Memory plan has overflowing size plus guard"),
});
let base_fact = Fact::Mem {
ty: data_mt,
min_offset: 0,
max_offset: 0,
nullable: false,
};
match &mut func.memory_types[ptr_memtype] {
ir::MemoryTypeData::Struct { size, fields } => {
let offset = u64::try_from(base_offset).unwrap();
fields.push(ir::MemoryTypeField {
offset,
ty: self.isa.pointer_type(),
readonly: true,
fact: Some(base_fact.clone()),
});
*size = std::cmp::max(
*size,
offset + u64::from(self.isa.pointer_type().bytes()),
);
}
_ => {
panic!("Bad memtype");
}
}
(Some(base_fact), Some(data_mt))
} else {
(None, None)
}
};
let mut flags = MemFlags::trusted().with_checked();
if !memory.memory_may_move(self.tunables) {
flags.set_readonly();
}
let heap_base = func.create_global_value(ir::GlobalValueData::Load {
base: ptr,
offset: Offset32::new(base_offset),
global_type: pointer_type,
flags,
});
func.global_value_facts[heap_base] = base_fact;
Ok(self.heaps.push(HeapData {
base: heap_base,
bound: heap_bound,
pcc_memory_type: memory_type,
memory,
}))
}
pub fn make_global(
&mut self,
func: &mut ir::Function,
index: GlobalIndex,
) -> WasmResult<GlobalVariable> {
let ty = self.module.globals[index].wasm_ty;
if ty.is_vmgcref_type() {
return Ok(GlobalVariable::Custom);
}
let (gv, offset) = self.get_global_location(func, index);
Ok(GlobalVariable::Memory {
gv,
offset: offset.into(),
ty: super::value_type(self.isa, ty),
})
}
pub fn make_indirect_sig(
&mut self,
func: &mut ir::Function,
index: TypeIndex,
) -> WasmResult<ir::SigRef> {
let interned_index = self.module.types[index].unwrap_module_type_index();
let wasm_func_ty = self.types[interned_index].unwrap_func();
let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
let sig_ref = func.import_signature(sig);
self.sig_ref_to_ty[sig_ref] = Some(wasm_func_ty);
Ok(sig_ref)
}
pub fn make_direct_func(
&mut self,
func: &mut ir::Function,
index: FuncIndex,
) -> WasmResult<ir::FuncRef> {
let sig = self.module.functions[index]
.signature
.unwrap_module_type_index();
let wasm_func_ty = self.types[sig].unwrap_func();
let sig = crate::wasm_call_signature(self.isa, wasm_func_ty, &self.tunables);
let signature = func.import_signature(sig);
self.sig_ref_to_ty[signature] = Some(wasm_func_ty);
let name =
ir::ExternalName::User(func.declare_imported_user_function(ir::UserExternalName {
namespace: crate::NS_WASM_FUNC,
index: index.as_u32(),
}));
Ok(func.import_function(ir::ExtFuncData {
name,
signature,
colocated: self.module.defined_func_index(index).is_some(),
}))
}
pub fn translate_call_indirect(
&mut self,
builder: &mut FunctionBuilder,
features: &WasmFeatures,
table_index: TableIndex,
ty_index: TypeIndex,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<Option<ir::Inst>> {
Call::new(builder, self).indirect_call(
features,
table_index,
ty_index,
sig_ref,
callee,
call_args,
)
}
pub fn translate_call(
&mut self,
builder: &mut FunctionBuilder,
callee_index: FuncIndex,
callee: ir::FuncRef,
call_args: &[ir::Value],
) -> WasmResult<ir::Inst> {
Call::new(builder, self).direct_call(callee_index, callee, call_args)
}
pub fn translate_call_ref(
&mut self,
builder: &mut FunctionBuilder,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<ir::Inst> {
Call::new(builder, self).call_ref(sig_ref, callee, call_args)
}
pub fn translate_return_call(
&mut self,
builder: &mut FunctionBuilder,
callee_index: FuncIndex,
callee: ir::FuncRef,
call_args: &[ir::Value],
) -> WasmResult<()> {
Call::new_tail(builder, self).direct_call(callee_index, callee, call_args)?;
Ok(())
}
pub fn translate_return_call_indirect(
&mut self,
builder: &mut FunctionBuilder,
features: &WasmFeatures,
table_index: TableIndex,
ty_index: TypeIndex,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<()> {
Call::new_tail(builder, self).indirect_call(
features,
table_index,
ty_index,
sig_ref,
callee,
call_args,
)?;
Ok(())
}
pub fn translate_return_call_ref(
&mut self,
builder: &mut FunctionBuilder,
sig_ref: ir::SigRef,
callee: ir::Value,
call_args: &[ir::Value],
) -> WasmResult<()> {
Call::new_tail(builder, self).call_ref(sig_ref, callee, call_args)?;
Ok(())
}
pub fn translate_memory_grow(
&mut self,
builder: &mut FunctionBuilder<'_>,
index: MemoryIndex,
_heap: Heap,
val: ir::Value,
) -> WasmResult<ir::Value> {
let mut pos = builder.cursor();
let memory_grow = self.builtin_functions.memory32_grow(&mut pos.func);
let index_arg = index.index();
let memory_index = pos.ins().iconst(I32, index_arg as i64);
let vmctx = self.vmctx_val(&mut pos);
let index_type = self.memory(index).idx_type;
let val = self.cast_index_to_i64(&mut pos, val, index_type);
let call_inst = pos.ins().call(memory_grow, &[vmctx, val, memory_index]);
let result = *pos.func.dfg.inst_results(call_inst).first().unwrap();
let single_byte_pages = match self.memory(index).page_size_log2 {
16 => false,
0 => true,
_ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
};
Ok(self.convert_pointer_to_index_type(
builder.cursor(),
result,
index_type,
single_byte_pages,
))
}
pub fn translate_memory_size(
&mut self,
mut pos: FuncCursor<'_>,
index: MemoryIndex,
_heap: Heap,
) -> WasmResult<ir::Value> {
let pointer_type = self.pointer_type();
let vmctx = self.vmctx(&mut pos.func);
let is_shared = self.module.memories[index].shared;
let base = pos.ins().global_value(pointer_type, vmctx);
let current_length_in_bytes = match self.module.defined_memory_index(index) {
Some(def_index) => {
if is_shared {
let offset =
i32::try_from(self.offsets.vmctx_vmmemory_pointer(def_index)).unwrap();
let vmmemory_ptr =
pos.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
let vmmemory_definition_offset =
i64::from(self.offsets.ptr.vmmemory_definition_current_length());
let vmmemory_definition_ptr =
pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
pos.ins().atomic_load(
pointer_type,
ir::MemFlags::trusted(),
vmmemory_definition_ptr,
)
} else {
let owned_index = self.module.owned_memory_index(def_index);
let offset = i32::try_from(
self.offsets
.vmctx_vmmemory_definition_current_length(owned_index),
)
.unwrap();
pos.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset)
}
}
None => {
let offset = i32::try_from(self.offsets.vmctx_vmmemory_import_from(index)).unwrap();
let vmmemory_ptr =
pos.ins()
.load(pointer_type, ir::MemFlags::trusted(), base, offset);
if is_shared {
let vmmemory_definition_offset =
i64::from(self.offsets.ptr.vmmemory_definition_current_length());
let vmmemory_definition_ptr =
pos.ins().iadd_imm(vmmemory_ptr, vmmemory_definition_offset);
pos.ins().atomic_load(
pointer_type,
ir::MemFlags::trusted(),
vmmemory_definition_ptr,
)
} else {
pos.ins().load(
pointer_type,
ir::MemFlags::trusted(),
vmmemory_ptr,
i32::from(self.offsets.ptr.vmmemory_definition_current_length()),
)
}
}
};
let page_size_log2 = i64::from(self.module.memories[index].page_size_log2);
let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2);
let single_byte_pages = match page_size_log2 {
16 => false,
0 => true,
_ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"),
};
Ok(self.convert_pointer_to_index_type(
pos,
current_length_in_pages,
self.memory(index).idx_type,
single_byte_pages,
))
}
pub fn translate_memory_copy(
&mut self,
builder: &mut FunctionBuilder<'_>,
src_index: MemoryIndex,
_src_heap: Heap,
dst_index: MemoryIndex,
_dst_heap: Heap,
dst: ir::Value,
src: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let mut pos = builder.cursor();
let vmctx = self.vmctx_val(&mut pos);
let memory_copy = self.builtin_functions.memory_copy(&mut pos.func);
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(dst_index).idx_type);
let src = self.cast_index_to_i64(&mut pos, src, self.memory(src_index).idx_type);
let len = if index_type_to_ir_type(self.memory(dst_index).idx_type) == I64
&& index_type_to_ir_type(self.memory(src_index).idx_type) == I64
{
len
} else {
pos.ins().uextend(I64, len)
};
let src_index = pos.ins().iconst(I32, i64::from(src_index.as_u32()));
let dst_index = pos.ins().iconst(I32, i64::from(dst_index.as_u32()));
pos.ins()
.call(memory_copy, &[vmctx, dst_index, dst, src_index, src, len]);
Ok(())
}
pub fn translate_memory_fill(
&mut self,
builder: &mut FunctionBuilder<'_>,
memory_index: MemoryIndex,
_heap: Heap,
dst: ir::Value,
val: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let mut pos = builder.cursor();
let memory_fill = self.builtin_functions.memory_fill(&mut pos.func);
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
let len = self.cast_index_to_i64(&mut pos, len, self.memory(memory_index).idx_type);
let memory_index_arg = pos.ins().iconst(I32, i64::from(memory_index.as_u32()));
let vmctx = self.vmctx_val(&mut pos);
pos.ins()
.call(memory_fill, &[vmctx, memory_index_arg, dst, val, len]);
Ok(())
}
pub fn translate_memory_init(
&mut self,
builder: &mut FunctionBuilder<'_>,
memory_index: MemoryIndex,
_heap: Heap,
seg_index: u32,
dst: ir::Value,
src: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let mut pos = builder.cursor();
let memory_init = self.builtin_functions.memory_init(&mut pos.func);
let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
let vmctx = self.vmctx_val(&mut pos);
let dst = self.cast_index_to_i64(&mut pos, dst, self.memory(memory_index).idx_type);
pos.ins().call(
memory_init,
&[vmctx, memory_index_arg, seg_index_arg, dst, src, len],
);
Ok(())
}
pub fn translate_data_drop(&mut self, mut pos: FuncCursor, seg_index: u32) -> WasmResult<()> {
let data_drop = self.builtin_functions.data_drop(&mut pos.func);
let seg_index_arg = pos.ins().iconst(I32, seg_index as i64);
let vmctx = self.vmctx_val(&mut pos);
pos.ins().call(data_drop, &[vmctx, seg_index_arg]);
Ok(())
}
pub fn translate_table_size(
&mut self,
pos: FuncCursor,
table_index: TableIndex,
) -> WasmResult<ir::Value> {
self.ensure_table_exists(pos.func, table_index);
let table_data = self.tables[table_index].as_ref().unwrap();
let index_type = index_type_to_ir_type(self.table(table_index).idx_type);
Ok(table_data.bound.bound(&*self.isa, pos, index_type))
}
pub fn translate_table_copy(
&mut self,
builder: &mut FunctionBuilder<'_>,
dst_table_index: TableIndex,
src_table_index: TableIndex,
dst: ir::Value,
src: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let (table_copy, dst_table_index_arg, src_table_index_arg) =
self.get_table_copy_func(&mut builder.func, dst_table_index, src_table_index);
let mut pos = builder.cursor();
let dst = self.cast_index_to_i64(&mut pos, dst, self.table(dst_table_index).idx_type);
let src = self.cast_index_to_i64(&mut pos, src, self.table(src_table_index).idx_type);
let len = if index_type_to_ir_type(self.table(dst_table_index).idx_type) == I64
&& index_type_to_ir_type(self.table(src_table_index).idx_type) == I64
{
len
} else {
pos.ins().uextend(I64, len)
};
let dst_table_index_arg = pos.ins().iconst(I32, dst_table_index_arg as i64);
let src_table_index_arg = pos.ins().iconst(I32, src_table_index_arg as i64);
let vmctx = self.vmctx_val(&mut pos);
pos.ins().call(
table_copy,
&[
vmctx,
dst_table_index_arg,
src_table_index_arg,
dst,
src,
len,
],
);
Ok(())
}
pub fn translate_table_init(
&mut self,
builder: &mut FunctionBuilder<'_>,
seg_index: u32,
table_index: TableIndex,
dst: ir::Value,
src: ir::Value,
len: ir::Value,
) -> WasmResult<()> {
let mut pos = builder.cursor();
let table_init = self.builtin_functions.table_init(&mut pos.func);
let table_index_arg = pos.ins().iconst(I32, i64::from(table_index.as_u32()));
let seg_index_arg = pos.ins().iconst(I32, i64::from(seg_index));
let vmctx = self.vmctx_val(&mut pos);
let index_type = self.table(table_index).idx_type;
let dst = self.cast_index_to_i64(&mut pos, dst, index_type);
let src = pos.ins().uextend(I64, src);
let len = pos.ins().uextend(I64, len);
pos.ins().call(
table_init,
&[vmctx, table_index_arg, seg_index_arg, dst, src, len],
);
Ok(())
}
pub fn translate_elem_drop(&mut self, mut pos: FuncCursor, elem_index: u32) -> WasmResult<()> {
let elem_drop = self.builtin_functions.elem_drop(&mut pos.func);
let elem_index_arg = pos.ins().iconst(I32, elem_index as i64);
let vmctx = self.vmctx_val(&mut pos);
pos.ins().call(elem_drop, &[vmctx, elem_index_arg]);
Ok(())
}
pub fn translate_atomic_wait(
&mut self,
builder: &mut FunctionBuilder<'_>,
memory_index: MemoryIndex,
_heap: Heap,
addr: ir::Value,
expected: ir::Value,
timeout: ir::Value,
) -> WasmResult<ir::Value> {
#[cfg(feature = "threads")]
{
let mut pos = builder.cursor();
let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
let implied_ty = pos.func.dfg.value_type(expected);
let (wait_func, memory_index) =
self.get_memory_atomic_wait(&mut pos.func, memory_index, implied_ty);
let memory_index_arg = pos.ins().iconst(I32, memory_index as i64);
let vmctx = self.vmctx_val(&mut pos);
let call_inst = pos.ins().call(
wait_func,
&[vmctx, memory_index_arg, addr, expected, timeout],
);
let ret = pos.func.dfg.inst_results(call_inst)[0];
Ok(builder.ins().ireduce(ir::types::I32, ret))
}
#[cfg(not(feature = "threads"))]
{
let _ = (builder, memory_index, addr, expected, timeout);
Err(wasmtime_environ::WasmError::Unsupported(
"threads support disabled at compile time".to_string(),
))
}
}
pub fn translate_atomic_notify(
&mut self,
builder: &mut FunctionBuilder<'_>,
memory_index: MemoryIndex,
_heap: Heap,
addr: ir::Value,
count: ir::Value,
) -> WasmResult<ir::Value> {
#[cfg(feature = "threads")]
{
let mut pos = builder.cursor();
let addr = self.cast_index_to_i64(&mut pos, addr, self.memory(memory_index).idx_type);
let atomic_notify = self.builtin_functions.memory_atomic_notify(&mut pos.func);
let memory_index_arg = pos.ins().iconst(I32, memory_index.index() as i64);
let vmctx = self.vmctx_val(&mut pos);
let call_inst = pos
.ins()
.call(atomic_notify, &[vmctx, memory_index_arg, addr, count]);
let ret = pos.func.dfg.inst_results(call_inst)[0];
Ok(builder.ins().ireduce(ir::types::I32, ret))
}
#[cfg(not(feature = "threads"))]
{
let _ = (builder, memory_index, addr, count);
Err(wasmtime_environ::WasmError::Unsupported(
"threads support disabled at compile time".to_string(),
))
}
}
pub fn translate_loop_header(&mut self, builder: &mut FunctionBuilder) -> WasmResult<()> {
if self.tunables.consume_fuel {
self.fuel_check(builder);
}
if self.tunables.epoch_interruption {
self.epoch_check(builder);
}
Ok(())
}
pub fn before_translate_operator(
&mut self,
op: &Operator,
builder: &mut FunctionBuilder,
state: &FuncTranslationState,
) -> WasmResult<()> {
if self.tunables.consume_fuel {
self.fuel_before_op(op, builder, state.reachable());
}
Ok(())
}
pub fn after_translate_operator(
&mut self,
op: &Operator,
builder: &mut FunctionBuilder,
state: &FuncTranslationState,
) -> WasmResult<()> {
if self.tunables.consume_fuel && state.reachable() {
self.fuel_after_op(op, builder);
}
Ok(())
}
pub fn before_unconditionally_trapping_memory_access(
&mut self,
builder: &mut FunctionBuilder,
) -> WasmResult<()> {
if self.tunables.consume_fuel {
self.fuel_increment_var(builder);
self.fuel_save_from_var(builder);
}
Ok(())
}
pub fn before_translate_function(
&mut self,
builder: &mut FunctionBuilder,
_state: &FuncTranslationState,
) -> WasmResult<()> {
if let Some(gv) = self.stack_limit_at_function_entry {
let limit = builder.ins().global_value(self.pointer_type(), gv);
let sp = builder.ins().get_stack_pointer(self.pointer_type());
let overflow = builder.ins().icmp(IntCC::UnsignedLessThan, sp, limit);
self.conditionally_trap(builder, overflow, ir::TrapCode::STACK_OVERFLOW);
}
if self.tunables.consume_fuel || self.tunables.epoch_interruption {
self.declare_vmstore_context_ptr(builder);
}
if self.tunables.consume_fuel {
self.fuel_function_entry(builder);
}
if self.tunables.epoch_interruption {
self.epoch_function_entry(builder);
}
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck {
let func_name = self.current_func_name(builder);
if func_name == Some("malloc") {
self.check_malloc_start(builder);
} else if func_name == Some("free") {
self.check_free_start(builder);
}
}
Ok(())
}
pub fn after_translate_function(
&mut self,
builder: &mut FunctionBuilder,
state: &FuncTranslationState,
) -> WasmResult<()> {
if self.tunables.consume_fuel && state.reachable() {
self.fuel_function_exit(builder);
}
Ok(())
}
pub fn relaxed_simd_deterministic(&self) -> bool {
self.tunables.relaxed_simd_deterministic
}
pub fn has_native_fma(&self) -> bool {
self.isa.has_native_fma()
}
pub fn is_x86(&self) -> bool {
self.isa.triple().architecture == target_lexicon::Architecture::X86_64
}
pub fn use_x86_blendv_for_relaxed_laneselect(&self, ty: Type) -> bool {
self.isa.has_x86_blendv_lowering(ty)
}
pub fn use_x86_pshufb_for_relaxed_swizzle(&self) -> bool {
self.isa.has_x86_pshufb_lowering()
}
pub fn use_x86_pmulhrsw_for_relaxed_q15mul(&self) -> bool {
self.isa.has_x86_pmulhrsw_lowering()
}
pub fn use_x86_pmaddubsw_for_dot(&self) -> bool {
self.isa.has_x86_pmaddubsw_lowering()
}
pub fn handle_before_return(&mut self, retvals: &[ir::Value], builder: &mut FunctionBuilder) {
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck {
let func_name = self.current_func_name(builder);
if func_name == Some("malloc") {
self.hook_malloc_exit(builder, retvals);
} else if func_name == Some("free") {
self.hook_free_exit(builder);
}
}
#[cfg(not(feature = "wmemcheck"))]
let _ = (retvals, builder);
}
pub fn before_load(
&mut self,
builder: &mut FunctionBuilder,
val_size: u8,
addr: ir::Value,
offset: u64,
) {
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck {
let check_load = self.builtin_functions.check_load(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let num_bytes = builder.ins().iconst(I32, val_size as i64);
let offset_val = builder.ins().iconst(I64, offset as i64);
builder
.ins()
.call(check_load, &[vmctx, num_bytes, addr, offset_val]);
}
#[cfg(not(feature = "wmemcheck"))]
let _ = (builder, val_size, addr, offset);
}
pub fn before_store(
&mut self,
builder: &mut FunctionBuilder,
val_size: u8,
addr: ir::Value,
offset: u64,
) {
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck {
let check_store = self.builtin_functions.check_store(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let num_bytes = builder.ins().iconst(I32, val_size as i64);
let offset_val = builder.ins().iconst(I64, offset as i64);
builder
.ins()
.call(check_store, &[vmctx, num_bytes, addr, offset_val]);
}
#[cfg(not(feature = "wmemcheck"))]
let _ = (builder, val_size, addr, offset);
}
pub fn update_global(
&mut self,
builder: &mut FunctionBuilder,
global_index: u32,
value: ir::Value,
) {
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck {
if global_index == 0 {
let update_stack_pointer =
self.builtin_functions.update_stack_pointer(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(update_stack_pointer, &[vmctx, value]);
}
}
#[cfg(not(feature = "wmemcheck"))]
let _ = (builder, global_index, value);
}
pub fn before_memory_grow(
&mut self,
builder: &mut FunctionBuilder,
num_pages: ir::Value,
mem_index: MemoryIndex,
) {
#[cfg(feature = "wmemcheck")]
if self.compiler.wmemcheck && mem_index.as_u32() == 0 {
let update_mem_size = self.builtin_functions.update_mem_size(builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
builder.ins().call(update_mem_size, &[vmctx, num_pages]);
}
#[cfg(not(feature = "wmemcheck"))]
let _ = (builder, num_pages, mem_index);
}
pub fn isa(&self) -> &dyn TargetIsa {
&*self.isa
}
pub fn trap(&mut self, builder: &mut FunctionBuilder, trap: ir::TrapCode) {
match (
self.clif_instruction_traps_enabled(),
crate::clif_trap_to_env_trap(trap),
) {
(true, _) | (_, None) => {
builder.ins().trap(trap);
}
(false, Some(trap)) => {
let libcall = self.builtin_functions.trap(&mut builder.func);
let vmctx = self.vmctx_val(&mut builder.cursor());
let trap_code = builder.ins().iconst(I8, i64::from(trap as u8));
builder.ins().call(libcall, &[vmctx, trap_code]);
let raise = self.builtin_functions.raise(&mut builder.func);
builder.ins().call(raise, &[vmctx]);
builder.ins().trap(TRAP_INTERNAL_ASSERT);
}
}
}
pub fn trapz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
if self.clif_instruction_traps_enabled() {
builder.ins().trapz(value, trap);
} else {
let ty = builder.func.dfg.value_type(value);
let zero = builder.ins().iconst(ty, 0);
let cmp = builder.ins().icmp(IntCC::Equal, value, zero);
self.conditionally_trap(builder, cmp, trap);
}
}
pub fn trapnz(&mut self, builder: &mut FunctionBuilder, value: ir::Value, trap: ir::TrapCode) {
if self.clif_instruction_traps_enabled() {
builder.ins().trapnz(value, trap);
} else {
let ty = builder.func.dfg.value_type(value);
let zero = builder.ins().iconst(ty, 0);
let cmp = builder.ins().icmp(IntCC::NotEqual, value, zero);
self.conditionally_trap(builder, cmp, trap);
}
}
pub fn uadd_overflow_trap(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
trap: ir::TrapCode,
) -> ir::Value {
if self.clif_instruction_traps_enabled() {
builder.ins().uadd_overflow_trap(lhs, rhs, trap)
} else {
let (ret, overflow) = builder.ins().uadd_overflow(lhs, rhs);
self.conditionally_trap(builder, overflow, trap);
ret
}
}
pub fn translate_sdiv(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
) -> ir::Value {
self.guard_signed_divide(builder, lhs, rhs);
builder.ins().sdiv(lhs, rhs)
}
pub fn translate_udiv(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
) -> ir::Value {
self.guard_zero_divisor(builder, rhs);
builder.ins().udiv(lhs, rhs)
}
pub fn translate_srem(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
) -> ir::Value {
self.guard_zero_divisor(builder, rhs);
builder.ins().srem(lhs, rhs)
}
pub fn translate_urem(
&mut self,
builder: &mut FunctionBuilder,
lhs: ir::Value,
rhs: ir::Value,
) -> ir::Value {
self.guard_zero_divisor(builder, rhs);
builder.ins().urem(lhs, rhs)
}
pub fn translate_fcvt_to_sint(
&mut self,
builder: &mut FunctionBuilder,
ty: ir::Type,
val: ir::Value,
) -> ir::Value {
if !self.clif_instruction_traps_enabled() {
self.guard_fcvt_to_int(
builder,
ty,
val,
(-2147483649.0, 2147483648.0),
(-9223372036854777856.0, 9223372036854775808.0),
);
}
builder.ins().fcvt_to_sint(ty, val)
}
pub fn translate_fcvt_to_uint(
&mut self,
builder: &mut FunctionBuilder,
ty: ir::Type,
val: ir::Value,
) -> ir::Value {
if !self.clif_instruction_traps_enabled() {
self.guard_fcvt_to_int(
builder,
ty,
val,
(-1.0, 4294967296.0),
(-1.0, 18446744073709551616.0),
);
}
builder.ins().fcvt_to_uint(ty, val)
}
pub fn clif_memory_traps_enabled(&self) -> bool {
self.tunables.signals_based_traps && !self.is_pulley()
}
pub fn clif_instruction_traps_enabled(&self) -> bool {
self.tunables.signals_based_traps || self.is_pulley()
}
pub fn load_from_zero_allowed(&self) -> bool {
self.is_pulley()
|| (self.clif_memory_traps_enabled() && self.heap_access_spectre_mitigation())
}
pub fn is_pulley(&self) -> bool {
self.isa.triple().is_pulley()
}
}
fn index_type_to_ir_type(index_type: IndexType) -> ir::Type {
match index_type {
IndexType::I32 => I32,
IndexType::I64 => I64,
}
}