pulley_interpreter::disas

Struct Disassembler

Source
pub struct Disassembler<'a> { /* private fields */ }
Expand description

A Pulley bytecode disassembler.

This is implemented as an OpVisitor, where you pass a Disassembler to a Decoder in order to disassemble instructions from a bytecode stream.

Alternatively, you can use the Disassembler::disassemble_all method to disassemble a complete bytecode stream.

Implementations§

Source§

impl<'a> Disassembler<'a>

Source

pub fn disassemble_all(bytecode: &'a [u8]) -> Result<String>

Disassemble every instruction in the given bytecode stream.

Source

pub fn new(bytecode: &'a [u8]) -> Self

Create a new Disassembler that can be used to incrementally disassemble instructions from the given bytecode stream.

Source

pub fn offsets(&mut self, offsets: bool) -> &mut Self

Whether to prefix each instruction’s disassembly with its offset.

True by default.

Source

pub fn hexdump(&mut self, hexdump: bool) -> &mut Self

Whether to include a hexdump of the bytecode in the disassembly.

True by default.

Source

pub fn br_tables(&mut self, enable: bool) -> &mut Self

Whether to include branche tables in the disassembly.

True by default.

Source

pub fn start_offset(&mut self, offset: usize) -> &mut Self

Configures the offset that this function starts from, if it doesn’t start from 0.

This can possibly be useful when a single function at a time is being disassembled.

Source

pub fn disas(&self) -> &str

Get the disassembly thus far.

Trait Implementations§

Source§

impl ExtendedOpVisitor for Disassembler<'_>

Source§

fn trap(&mut self)

Raise a trap.
Source§

fn nop(&mut self)

Do nothing.
Source§

fn call_indirect_host(&mut self, id: u8)

A special opcode to halt interpreter execution and yield control back to the host. Read more
Source§

fn xmov_fp(&mut self, dst: XReg)

Gets the special “fp” register and moves it into dst.
Source§

fn xmov_lr(&mut self, dst: XReg)

Gets the special “lr” register and moves it into dst.
Source§

fn bswap32(&mut self, dst: XReg, src: XReg)

dst = byteswap(low32(src))
Source§

fn bswap64(&mut self, dst: XReg, src: XReg)

dst = byteswap(src)
Source§

fn xadd32_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)

32-bit checked unsigned addition: low32(dst) = low32(src1) + low32(src2). Read more
Source§

fn xadd64_uoverflow_trap(&mut self, operands: BinaryOperands<XReg>)

64-bit checked unsigned addition: dst = src1 + src2.
Source§

fn xmulhi64_s(&mut self, operands: BinaryOperands<XReg>)

dst = high64(src1 * src2) (signed)
Source§

fn xmulhi64_u(&mut self, operands: BinaryOperands<XReg>)

dst = high64(src1 * src2) (unsigned)
Source§

fn xbmask32(&mut self, dst: XReg, src: XReg)

low32(dst) = if low32(src) == 0 { 0 } else { -1 }
Source§

fn xbmask64(&mut self, dst: XReg, src: XReg)

dst = if src == 0 { 0 } else { -1 }
Source§

fn xload16be_u32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = zext(*addr)
Source§

fn xload16be_s32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = sext(*addr)
Source§

fn xload32be_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = zext(*addr)
Source§

fn xload64be_o32(&mut self, dst: XReg, addr: AddrO32)

dst = *addr
Source§

fn xstore16be_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low16(src)
Source§

fn xstore32be_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low32(src)
Source§

fn xstore64be_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low64(src)
Source§

fn fload32be_o32(&mut self, dst: FReg, addr: AddrO32)

low32(dst) = zext(*addr)
Source§

fn fload64be_o32(&mut self, dst: FReg, addr: AddrO32)

dst = *addr
Source§

fn fstore32be_o32(&mut self, addr: AddrO32, src: FReg)

*addr = low32(src)
Source§

fn fstore64be_o32(&mut self, addr: AddrO32, src: FReg)

*addr = src
Source§

fn fload32le_o32(&mut self, dst: FReg, addr: AddrO32)

low32(dst) = zext(*addr)
Source§

fn fload64le_o32(&mut self, dst: FReg, addr: AddrO32)

dst = *addr
Source§

fn fstore32le_o32(&mut self, addr: AddrO32, src: FReg)

*addr = low32(src)
Source§

fn fstore64le_o32(&mut self, addr: AddrO32, src: FReg)

*addr = src
Source§

fn fload32le_z(&mut self, dst: FReg, addr: AddrZ)

low32(dst) = zext(*addr)
Source§

fn fload64le_z(&mut self, dst: FReg, addr: AddrZ)

dst = *addr
Source§

fn fstore32le_z(&mut self, addr: AddrZ, src: FReg)

*addr = low32(src)
Source§

fn fstore64le_z(&mut self, addr: AddrZ, src: FReg)

*addr = src
Source§

fn fload32le_g32(&mut self, dst: FReg, addr: AddrG32)

low32(dst) = zext(*addr)
Source§

fn fload64le_g32(&mut self, dst: FReg, addr: AddrG32)

dst = *addr
Source§

fn fstore32le_g32(&mut self, addr: AddrG32, src: FReg)

*addr = low32(src)
Source§

fn fstore64le_g32(&mut self, addr: AddrG32, src: FReg)

*addr = src
Source§

fn vload128le_o32(&mut self, dst: VReg, addr: AddrO32)

dst = *addr
Source§

fn vstore128le_o32(&mut self, addr: AddrO32, src: VReg)

*addr = src
Source§

fn vload128le_z(&mut self, dst: VReg, addr: AddrZ)

dst = *(ptr + offset)
Source§

fn vstore128le_z(&mut self, addr: AddrZ, src: VReg)

*(ptr + offset) = src
Source§

fn vload128le_g32(&mut self, dst: VReg, addr: AddrG32)

dst = *(ptr + offset)
Source§

fn vstore128le_g32(&mut self, addr: AddrG32, src: VReg)

*(ptr + offset) = src
Source§

fn fmov(&mut self, dst: FReg, src: FReg)

Move between f registers.
Source§

fn vmov(&mut self, dst: VReg, src: VReg)

Move between v registers.
Source§

fn bitcast_int_from_float_32(&mut self, dst: XReg, src: FReg)

low32(dst) = bitcast low32(src) as i32
Source§

fn bitcast_int_from_float_64(&mut self, dst: XReg, src: FReg)

dst = bitcast src as i64
Source§

fn bitcast_float_from_int_32(&mut self, dst: FReg, src: XReg)

low32(dst) = bitcast low32(src) as f32
Source§

fn bitcast_float_from_int_64(&mut self, dst: FReg, src: XReg)

dst = bitcast src as f64
Source§

fn fconst32(&mut self, dst: FReg, bits: u32)

low32(dst) = bits
Source§

fn fconst64(&mut self, dst: FReg, bits: u64)

dst = bits
Source§

fn feq32(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 == src2)
Source§

fn fneq32(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 != src2)
Source§

fn flt32(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 < src2)
Source§

fn flteq32(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 <= src2)
Source§

fn feq64(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 == src2)
Source§

fn fneq64(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 != src2)
Source§

fn flt64(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 < src2)
Source§

fn flteq64(&mut self, dst: XReg, src1: FReg, src2: FReg)

low32(dst) = zext(src1 <= src2)
Source§

fn fselect32(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Source§

fn fselect64(&mut self, dst: FReg, cond: XReg, if_nonzero: FReg, if_zero: FReg)

dst = low32(cond) ? if_nonzero : if_zero
Source§

fn f32_from_f64(&mut self, dst: FReg, src: FReg)

low32(dst) = demote(src)
Source§

fn f64_from_f32(&mut self, dst: FReg, src: FReg)

(st) = promote(low32(src))
Source§

fn f32_from_x32_s(&mut self, dst: FReg, src: XReg)

low32(dst) = checked_f32_from_signed(low32(src))
Source§

fn f32_from_x32_u(&mut self, dst: FReg, src: XReg)

low32(dst) = checked_f32_from_unsigned(low32(src))
Source§

fn f32_from_x64_s(&mut self, dst: FReg, src: XReg)

low32(dst) = checked_f32_from_signed(src)
Source§

fn f32_from_x64_u(&mut self, dst: FReg, src: XReg)

low32(dst) = checked_f32_from_unsigned(src)
Source§

fn f64_from_x32_s(&mut self, dst: FReg, src: XReg)

dst = checked_f64_from_signed(low32(src))
Source§

fn f64_from_x32_u(&mut self, dst: FReg, src: XReg)

dst = checked_f64_from_unsigned(low32(src))
Source§

fn f64_from_x64_s(&mut self, dst: FReg, src: XReg)

dst = checked_f64_from_signed(src)
Source§

fn f64_from_x64_u(&mut self, dst: FReg, src: XReg)

dst = checked_f64_from_unsigned(src)
Source§

fn x32_from_f32_s(&mut self, dst: XReg, src: FReg)

low32(dst) = checked_signed_from_f32(low32(src))
Source§

fn x32_from_f32_u(&mut self, dst: XReg, src: FReg)

low32(dst) = checked_unsigned_from_f32(low32(src))
Source§

fn x32_from_f64_s(&mut self, dst: XReg, src: FReg)

low32(dst) = checked_signed_from_f64(src)
Source§

fn x32_from_f64_u(&mut self, dst: XReg, src: FReg)

low32(dst) = checked_unsigned_from_f64(src)
Source§

fn x64_from_f32_s(&mut self, dst: XReg, src: FReg)

dst = checked_signed_from_f32(low32(src))
Source§

fn x64_from_f32_u(&mut self, dst: XReg, src: FReg)

dst = checked_unsigned_from_f32(low32(src))
Source§

fn x64_from_f64_s(&mut self, dst: XReg, src: FReg)

dst = checked_signed_from_f64(src)
Source§

fn x64_from_f64_u(&mut self, dst: XReg, src: FReg)

dst = checked_unsigned_from_f64(src)
Source§

fn x32_from_f32_s_sat(&mut self, dst: XReg, src: FReg)

low32(dst) = saturating_signed_from_f32(low32(src))
Source§

fn x32_from_f32_u_sat(&mut self, dst: XReg, src: FReg)

low32(dst) = saturating_unsigned_from_f32(low32(src))
Source§

fn x32_from_f64_s_sat(&mut self, dst: XReg, src: FReg)

low32(dst) = saturating_signed_from_f64(src)
Source§

fn x32_from_f64_u_sat(&mut self, dst: XReg, src: FReg)

low32(dst) = saturating_unsigned_from_f64(src)
Source§

fn x64_from_f32_s_sat(&mut self, dst: XReg, src: FReg)

dst = saturating_signed_from_f32(low32(src))
Source§

fn x64_from_f32_u_sat(&mut self, dst: XReg, src: FReg)

dst = saturating_unsigned_from_f32(low32(src))
Source§

fn x64_from_f64_s_sat(&mut self, dst: XReg, src: FReg)

dst = saturating_signed_from_f64(src)
Source§

fn x64_from_f64_u_sat(&mut self, dst: XReg, src: FReg)

dst = saturating_unsigned_from_f64(src)
Source§

fn fcopysign32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = copysign(low32(src1), low32(src2))
Source§

fn fcopysign64(&mut self, operands: BinaryOperands<FReg>)

dst = copysign(src1, src2)
Source§

fn fadd32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = low32(src1) + low32(src2)
Source§

fn fsub32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = low32(src1) - low32(src2)
Source§

fn vsubf32x4(&mut self, operands: BinaryOperands<VReg>)

low128(dst) = low128(src1) - low128(src2)
Source§

fn fmul32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = low32(src1) * low32(src2)
Source§

fn vmulf32x4(&mut self, operands: BinaryOperands<VReg>)

low128(dst) = low128(src1) * low128(src2)
Source§

fn fdiv32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = low32(src1) / low32(src2)
Source§

fn vdivf32x4(&mut self, operands: BinaryOperands<VReg>)

low128(dst) = low128(src1) / low128(src2)
Source§

fn fmaximum32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = ieee_maximum(low32(src1), low32(src2))
Source§

fn fminimum32(&mut self, operands: BinaryOperands<FReg>)

low32(dst) = ieee_minimum(low32(src1), low32(src2))
Source§

fn ftrunc32(&mut self, dst: FReg, src: FReg)

low32(dst) = ieee_trunc(low32(src))
Source§

fn vtrunc32x4(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_trunc(low128(src))
Source§

fn vtrunc64x2(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_trunc(low128(src))
Source§

fn ffloor32(&mut self, dst: FReg, src: FReg)

low32(dst) = ieee_floor(low32(src))
Source§

fn vfloor32x4(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_floor(low128(src))
Source§

fn vfloor64x2(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_floor(low128(src))
Source§

fn fceil32(&mut self, dst: FReg, src: FReg)

low32(dst) = ieee_ceil(low32(src))
Source§

fn vceil32x4(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_ceil(low128(src))
Source§

fn vceil64x2(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_ceil(low128(src))
Source§

fn fnearest32(&mut self, dst: FReg, src: FReg)

low32(dst) = ieee_nearest(low32(src))
Source§

fn fsqrt32(&mut self, dst: FReg, src: FReg)

low32(dst) = ieee_sqrt(low32(src))
Source§

fn vsqrt32x4(&mut self, dst: VReg, src: VReg)

low32(dst) = ieee_sqrt(low32(src))
Source§

fn vsqrt64x2(&mut self, dst: VReg, src: VReg)

low32(dst) = ieee_sqrt(low32(src))
Source§

fn fneg32(&mut self, dst: FReg, src: FReg)

low32(dst) = -low32(src)
Source§

fn vnegf32x4(&mut self, dst: VReg, src: VReg)

low128(dst) = -low128(src)
Source§

fn fabs32(&mut self, dst: FReg, src: FReg)

low32(dst) = |low32(src)|
Source§

fn fadd64(&mut self, operands: BinaryOperands<FReg>)

dst = src1 + src2
Source§

fn fsub64(&mut self, operands: BinaryOperands<FReg>)

dst = src1 - src2
Source§

fn fmul64(&mut self, operands: BinaryOperands<FReg>)

dst = src1 * src2
Source§

fn fdiv64(&mut self, operands: BinaryOperands<FReg>)

dst = src1 / src2
Source§

fn vdivf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 / src2
Source§

fn fmaximum64(&mut self, operands: BinaryOperands<FReg>)

dst = ieee_maximum(src1, src2)
Source§

fn fminimum64(&mut self, operands: BinaryOperands<FReg>)

dst = ieee_minimum(src1, src2)
Source§

fn ftrunc64(&mut self, dst: FReg, src: FReg)

dst = ieee_trunc(src)
Source§

fn ffloor64(&mut self, dst: FReg, src: FReg)

dst = ieee_floor(src)
Source§

fn fceil64(&mut self, dst: FReg, src: FReg)

dst = ieee_ceil(src)
Source§

fn fnearest64(&mut self, dst: FReg, src: FReg)

dst = ieee_nearest(src)
Source§

fn vnearest32x4(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_nearest(low128(src))
Source§

fn vnearest64x2(&mut self, dst: VReg, src: VReg)

low128(dst) = ieee_nearest(low128(src))
Source§

fn fsqrt64(&mut self, dst: FReg, src: FReg)

dst = ieee_sqrt(src)
Source§

fn fneg64(&mut self, dst: FReg, src: FReg)

dst = -src
Source§

fn fabs64(&mut self, dst: FReg, src: FReg)

dst = |src|
Source§

fn vconst128(&mut self, dst: VReg, imm: u128)

dst = imm
Source§

fn vaddi8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddi16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddi32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddi64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 + src2
Source§

fn vaddi8x16_sat(&mut self, operands: BinaryOperands<VReg>)

dst = satruating_add(src1, src2)
Source§

fn vaddu8x16_sat(&mut self, operands: BinaryOperands<VReg>)

dst = satruating_add(src1, src2)
Source§

fn vaddi16x8_sat(&mut self, operands: BinaryOperands<VReg>)

dst = satruating_add(src1, src2)
Source§

fn vaddu16x8_sat(&mut self, operands: BinaryOperands<VReg>)

dst = satruating_add(src1, src2)
Source§

fn vaddpairwisei16x8_s(&mut self, operands: BinaryOperands<VReg>)

dst = [src1[0] + src1[1], ..., src2[6] + src2[7]]
Source§

fn vaddpairwisei32x4_s(&mut self, operands: BinaryOperands<VReg>)

dst = [src1[0] + src1[1], ..., src2[2] + src2[3]]
Source§

fn vshli8x16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 << src2
Source§

fn vshli16x8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 << src2
Source§

fn vshli32x4(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 << src2
Source§

fn vshli64x2(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 << src2
Source§

fn vshri8x16_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (signed)
Source§

fn vshri16x8_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (signed)
Source§

fn vshri32x4_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (signed)
Source§

fn vshri64x2_s(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (signed)
Source§

fn vshri8x16_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (unsigned)
Source§

fn vshri16x8_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (unsigned)
Source§

fn vshri32x4_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (unsigned)
Source§

fn vshri64x2_u(&mut self, operands: BinaryOperands<VReg, VReg, XReg>)

dst = src1 >> src2 (unsigned)
Source§

fn vsplatx8(&mut self, dst: VReg, src: XReg)

dst = splat(low8(src))
Source§

fn vsplatx16(&mut self, dst: VReg, src: XReg)

dst = splat(low16(src))
Source§

fn vsplatx32(&mut self, dst: VReg, src: XReg)

dst = splat(low32(src))
Source§

fn vsplatx64(&mut self, dst: VReg, src: XReg)

dst = splat(src)
Source§

fn vsplatf32(&mut self, dst: VReg, src: FReg)

dst = splat(low32(src))
Source§

fn vsplatf64(&mut self, dst: VReg, src: FReg)

dst = splat(src)
Source§

fn vload8x8_s_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as i8x8 and sign-extend to i16x8.
Source§

fn vload8x8_u_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as u8x8 and zero-extend to i16x8.
Source§

fn vload16x4le_s_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as i16x4 and sign-extend to i32x4.
Source§

fn vload16x4le_u_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as u16x4 and zero-extend to i32x4.
Source§

fn vload32x2le_s_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as i32x2 and sign-extend to i64x2.
Source§

fn vload32x2le_u_z(&mut self, dst: VReg, addr: AddrZ)

Load the 64-bit source as u32x2 and zero-extend to i64x2.
Source§

fn vband128(&mut self, operands: BinaryOperands<VReg>)

dst = src1 & src2
Source§

fn vbor128(&mut self, operands: BinaryOperands<VReg>)

dst = src1 | src2
Source§

fn vbxor128(&mut self, operands: BinaryOperands<VReg>)

dst = src1 ^ src2
Source§

fn vbnot128(&mut self, dst: VReg, src: VReg)

dst = !src1
Source§

fn vbitselect128(&mut self, dst: VReg, c: VReg, x: VReg, y: VReg)

dst = (c & x) | (!c & y)
Source§

fn vbitmask8x16(&mut self, dst: XReg, src: VReg)

Collect high bits of each lane into the low 32-bits of the destination.
Source§

fn vbitmask16x8(&mut self, dst: XReg, src: VReg)

Collect high bits of each lane into the low 32-bits of the destination.
Source§

fn vbitmask32x4(&mut self, dst: XReg, src: VReg)

Collect high bits of each lane into the low 32-bits of the destination.
Source§

fn vbitmask64x2(&mut self, dst: XReg, src: VReg)

Collect high bits of each lane into the low 32-bits of the destination.
Source§

fn valltrue8x16(&mut self, dst: XReg, src: VReg)

Store whether all lanes are nonzero in dst.
Source§

fn valltrue16x8(&mut self, dst: XReg, src: VReg)

Store whether all lanes are nonzero in dst.
Source§

fn valltrue32x4(&mut self, dst: XReg, src: VReg)

Store whether all lanes are nonzero in dst.
Source§

fn valltrue64x2(&mut self, dst: XReg, src: VReg)

Store whether any lanes are nonzero in dst.
Source§

fn vanytrue8x16(&mut self, dst: XReg, src: VReg)

Store whether any lanes are nonzero in dst.
Source§

fn vanytrue16x8(&mut self, dst: XReg, src: VReg)

Store whether any lanes are nonzero in dst.
Source§

fn vanytrue32x4(&mut self, dst: XReg, src: VReg)

Store whether any lanes are nonzero in dst.
Source§

fn vanytrue64x2(&mut self, dst: XReg, src: VReg)

Store whether any lanes are nonzero in dst.
Source§

fn vf32x4_from_i32x4_s(&mut self, dst: VReg, src: VReg)

Int-to-float conversion (same as f32_from_x32_s)
Source§

fn vf32x4_from_i32x4_u(&mut self, dst: VReg, src: VReg)

Int-to-float conversion (same as f32_from_x32_u)
Source§

fn vf64x2_from_i64x2_s(&mut self, dst: VReg, src: VReg)

Int-to-float conversion (same as f64_from_x64_s)
Source§

fn vf64x2_from_i64x2_u(&mut self, dst: VReg, src: VReg)

Int-to-float conversion (same as f64_from_x64_u)
Source§

fn vi32x4_from_f32x4_s(&mut self, dst: VReg, src: VReg)

Float-to-int conversion (same as x32_from_f32_s
Source§

fn vi32x4_from_f32x4_u(&mut self, dst: VReg, src: VReg)

Float-to-int conversion (same as x32_from_f32_u
Source§

fn vi64x2_from_f64x2_s(&mut self, dst: VReg, src: VReg)

Float-to-int conversion (same as x64_from_f64_s
Source§

fn vi64x2_from_f64x2_u(&mut self, dst: VReg, src: VReg)

Float-to-int conversion (same as x64_from_f64_u
Source§

fn vwidenlow8x16_s(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenlow8x16_u(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§

fn vwidenlow16x8_s(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenlow16x8_u(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§

fn vwidenlow32x4_s(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenlow32x4_u(&mut self, dst: VReg, src: VReg)

Widens the low lanes of the input vector, as unsigned, to twice the width.
Source§

fn vwidenhigh8x16_s(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenhigh8x16_u(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§

fn vwidenhigh16x8_s(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenhigh16x8_u(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§

fn vwidenhigh32x4_s(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as signed, to twice the width.
Source§

fn vwidenhigh32x4_u(&mut self, dst: VReg, src: VReg)

Widens the high lanes of the input vector, as unsigned, to twice the width.
Source§

fn vnarrow16x8_s(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§

fn vnarrow16x8_u(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 16x8 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§

fn vnarrow32x4_s(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§

fn vnarrow32x4_u(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 32x4 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§

fn vnarrow64x2_s(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is signed and saturating.
Source§

fn vnarrow64x2_u(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 64x2 vectors, assuming all input lanes are signed, to half the width. Narrowing is unsigned and saturating.
Source§

fn vunarrow64x2_u(&mut self, operands: BinaryOperands<VReg>)

Narrows the two 64x2 vectors, assuming all input lanes are unsigned, to half the width. Narrowing is unsigned and saturating.
Source§

fn vfpromotelow(&mut self, dst: VReg, src: VReg)

Promotes the low two lanes of the f32x4 input to f64x2.
Source§

fn vfdemote(&mut self, dst: VReg, src: VReg)

Demotes the two f64x2 lanes to f32x2 and then extends with two more zero lanes.
Source§

fn vsubi8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src1 - src2
Source§

fn vsubi16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src1 - src2
Source§

fn vsubi32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src1 - src2
Source§

fn vsubi64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 - src2
Source§

fn vsubf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 - src2
Source§

fn vsubi8x16_sat(&mut self, operands: BinaryOperands<VReg>)

dst = saturating_sub(src1, src2)
Source§

fn vsubu8x16_sat(&mut self, operands: BinaryOperands<VReg>)

dst = saturating_sub(src1, src2)
Source§

fn vsubi16x8_sat(&mut self, operands: BinaryOperands<VReg>)

dst = saturating_sub(src1, src2)
Source§

fn vsubu16x8_sat(&mut self, operands: BinaryOperands<VReg>)

dst = saturating_sub(src1, src2)
Source§

fn vmuli8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src1 * src2
Source§

fn vmuli16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src1 * src2
Source§

fn vmuli32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src1 * src2
Source§

fn vmuli64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 * src2
Source§

fn vmulf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src1 * src2
Source§

fn vqmulrsi16x8(&mut self, operands: BinaryOperands<VReg>)

dst = signed_saturate(src1 * src2 + (1 << (Q - 1)) >> Q)
Source§

fn vpopcnt8x16(&mut self, dst: VReg, src: VReg)

dst = count_ones(src)
Source§

fn xextractv8x16(&mut self, dst: XReg, src: VReg, lane: u8)

low32(dst) = zext(src[lane])
Source§

fn xextractv16x8(&mut self, dst: XReg, src: VReg, lane: u8)

low32(dst) = zext(src[lane])
Source§

fn xextractv32x4(&mut self, dst: XReg, src: VReg, lane: u8)

low32(dst) = src[lane]
Source§

fn xextractv64x2(&mut self, dst: XReg, src: VReg, lane: u8)

dst = src[lane]
Source§

fn fextractv32x4(&mut self, dst: FReg, src: VReg, lane: u8)

low32(dst) = src[lane]
Source§

fn fextractv64x2(&mut self, dst: FReg, src: VReg, lane: u8)

dst = src[lane]
Source§

fn vinsertx8(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn vinsertx16(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn vinsertx32(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn vinsertx64(&mut self, operands: BinaryOperands<VReg, VReg, XReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn vinsertf32(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn vinsertf64(&mut self, operands: BinaryOperands<VReg, VReg, FReg>, lane: u8)

dst = src1; dst[lane] = src2
Source§

fn veq8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneq8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vslt8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (signed)
Source§

fn vslteq8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (signed)
Source§

fn vult8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (unsigned)
Source§

fn vulteq8x16(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (unsigned)
Source§

fn veq16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneq16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vslt16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (signed)
Source§

fn vslteq16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (signed)
Source§

fn vult16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (unsigned)
Source§

fn vulteq16x8(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (unsigned)
Source§

fn veq32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneq32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vslt32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (signed)
Source§

fn vslteq32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (signed)
Source§

fn vult32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (unsigned)
Source§

fn vulteq32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (unsigned)
Source§

fn veq64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneq64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vslt64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (signed)
Source§

fn vslteq64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (signed)
Source§

fn vult64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst (unsigned)
Source§

fn vulteq64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst (unsigned)
Source§

fn vneg8x16(&mut self, dst: VReg, src: VReg)

dst = -src
Source§

fn vneg16x8(&mut self, dst: VReg, src: VReg)

dst = -src
Source§

fn vneg32x4(&mut self, dst: VReg, src: VReg)

dst = -src
Source§

fn vneg64x2(&mut self, dst: VReg, src: VReg)

dst = -src
Source§

fn vnegf64x2(&mut self, dst: VReg, src: VReg)

dst = -src
Source§

fn vmin8x16_s(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (signed)
Source§

fn vmin8x16_u(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (unsigned)
Source§

fn vmin16x8_s(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (signed)
Source§

fn vmin16x8_u(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (unsigned)
Source§

fn vmax8x16_s(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (signed)
Source§

fn vmax8x16_u(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (unsigned)
Source§

fn vmax16x8_s(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (signed)
Source§

fn vmax16x8_u(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (unsigned)
Source§

fn vmin32x4_s(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (signed)
Source§

fn vmin32x4_u(&mut self, operands: BinaryOperands<VReg>)

dst = min(src1, src2) (unsigned)
Source§

fn vmax32x4_s(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (signed)
Source§

fn vmax32x4_u(&mut self, operands: BinaryOperands<VReg>)

dst = max(src1, src2) (unsigned)
Source§

fn vabs8x16(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vabs16x8(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vabs32x4(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vabs64x2(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vabsf32x4(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vabsf64x2(&mut self, dst: VReg, src: VReg)

dst = |src|
Source§

fn vmaximumf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = ieee_maximum(src1, src2)
Source§

fn vmaximumf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = ieee_maximum(src1, src2)
Source§

fn vminimumf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = ieee_minimum(src1, src2)
Source§

fn vminimumf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = ieee_minimum(src1, src2)
Source§

fn vshuffle(&mut self, dst: VReg, src1: VReg, src2: VReg, mask: u128)

dst = shuffle(src1, src2, mask)
Source§

fn vswizzlei8x16(&mut self, operands: BinaryOperands<VReg>)

dst = swizzle(src1, src2)
Source§

fn vavground8x16(&mut self, operands: BinaryOperands<VReg>)

dst = (src1 + src2 + 1) // 2
Source§

fn vavground16x8(&mut self, operands: BinaryOperands<VReg>)

dst = (src1 + src2 + 1) // 2
Source§

fn veqf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneqf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vltf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst
Source§

fn vlteqf32x4(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst
Source§

fn veqf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src == dst
Source§

fn vneqf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src != dst
Source§

fn vltf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src < dst
Source§

fn vlteqf64x2(&mut self, operands: BinaryOperands<VReg>)

dst = src <= dst
Source§

fn vfma32x4(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg)

dst = ieee_fma(a, b, c)
Source§

fn vfma64x2(&mut self, dst: VReg, a: VReg, b: VReg, c: VReg)

dst = ieee_fma(a, b, c)
Source§

fn vselect(&mut self, dst: VReg, cond: XReg, if_nonzero: VReg, if_zero: VReg)

dst = low32(cond) ? if_nonzero : if_zero
Source§

fn xadd128( &mut self, dst_lo: XReg, dst_hi: XReg, lhs_lo: XReg, lhs_hi: XReg, rhs_lo: XReg, rhs_hi: XReg, )

dst_hi:dst_lo = lhs_hi:lhs_lo + rhs_hi:rhs_lo
Source§

fn xsub128( &mut self, dst_lo: XReg, dst_hi: XReg, lhs_lo: XReg, lhs_hi: XReg, rhs_lo: XReg, rhs_hi: XReg, )

dst_hi:dst_lo = lhs_hi:lhs_lo - rhs_hi:rhs_lo
Source§

fn xwidemul64_s(&mut self, dst_lo: XReg, dst_hi: XReg, lhs: XReg, rhs: XReg)

dst_hi:dst_lo = sext(lhs) * sext(rhs)
Source§

fn xwidemul64_u(&mut self, dst_lo: XReg, dst_hi: XReg, lhs: XReg, rhs: XReg)

dst_hi:dst_lo = zext(lhs) * zext(rhs)
Source§

impl<'a> OpVisitor for Disassembler<'a>

Source§

type BytecodeStream = SafeBytecodeStream<'a>

The type of this visitor’s bytecode stream.
Source§

type Return = ()

The type of values returned by each visitor method.
Source§

fn bytecode(&mut self) -> &mut Self::BytecodeStream

Get this visitor’s underlying bytecode stream.
Source§

fn before_visit(&mut self)

A callback invoked before starting to decode an instruction. Read more
Source§

fn after_visit(&mut self)

A callback invoked after an instruction has been completely decoded. Read more
Source§

fn ret(&mut self)

Transfer control the address in the lr register.
Source§

fn call(&mut self, offset: PcRelOffset)

Transfer control to the PC at the given offset and set the lr register to the PC just after this instruction. Read more
Source§

fn call1(&mut self, arg1: XReg, offset: PcRelOffset)

Like call, but also x0 = arg1
Source§

fn call2(&mut self, arg1: XReg, arg2: XReg, offset: PcRelOffset)

Like call, but also x0, x1 = arg1, arg2
Source§

fn call3(&mut self, arg1: XReg, arg2: XReg, arg3: XReg, offset: PcRelOffset)

Like call, but also x0, x1, x2 = arg1, arg2, arg3
Source§

fn call4( &mut self, arg1: XReg, arg2: XReg, arg3: XReg, arg4: XReg, offset: PcRelOffset, )

Like call, but also x0, x1, x2, x3 = arg1, arg2, arg3, arg4
Source§

fn call_indirect(&mut self, reg: XReg)

Transfer control to the PC in reg and set lr to the PC just after this instruction.
Source§

fn jump(&mut self, offset: PcRelOffset)

Unconditionally transfer control to the PC at the given offset.
Source§

fn xjump(&mut self, reg: XReg)

Unconditionally transfer control to the PC at specified register.
Source§

fn br_if32(&mut self, cond: XReg, offset: PcRelOffset)

Conditionally transfer control to the given PC offset if low32(cond) contains a non-zero value.
Source§

fn br_if_not32(&mut self, cond: XReg, offset: PcRelOffset)

Conditionally transfer control to the given PC offset if low32(cond) contains a zero value.
Source§

fn br_if_xeq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xneq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xslt32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xslteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xult32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xulteq32(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xeq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xneq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xslt64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xslteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xult64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xulteq64(&mut self, a: XReg, b: XReg, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xeq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xeq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xneq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xneq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xslt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xslt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xsgt32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a > b.
Source§

fn br_if_xsgt32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a > b.
Source§

fn br_if_xslteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xslteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xsgteq32_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a >= b.
Source§

fn br_if_xsgteq32_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a >= b.
Source§

fn br_if_xult32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xult32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xulteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xulteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xugt32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a > b.
Source§

fn br_if_xugt32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a > b.
Source§

fn br_if_xugteq32_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a >= b.
Source§

fn br_if_xugteq32_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a >= b.
Source§

fn br_if_xeq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xeq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if a == b.
Source§

fn br_if_xneq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xneq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if a != b.
Source§

fn br_if_xslt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xslt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a < b.
Source§

fn br_if_xsgt64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a > b.
Source§

fn br_if_xsgt64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a > b.
Source§

fn br_if_xslteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xslteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a <= b.
Source§

fn br_if_xsgteq64_i8(&mut self, a: XReg, b: i8, offset: PcRelOffset)

Branch if signed a >= b.
Source§

fn br_if_xsgteq64_i32(&mut self, a: XReg, b: i32, offset: PcRelOffset)

Branch if signed a >= b.
Source§

fn br_if_xult64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xult64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a < b.
Source§

fn br_if_xulteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xulteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a <= b.
Source§

fn br_if_xugt64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a > b.
Source§

fn br_if_xugt64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a > b.
Source§

fn br_if_xugteq64_u8(&mut self, a: XReg, b: u8, offset: PcRelOffset)

Branch if unsigned a >= b.
Source§

fn br_if_xugteq64_u32(&mut self, a: XReg, b: u32, offset: PcRelOffset)

Branch if unsigned a >= b.
Source§

fn br_table32(&mut self, idx: XReg, amt: u32)

Branch to the label indicated by low32(idx). Read more
Source§

fn xmov(&mut self, dst: XReg, src: XReg)

Move between x registers.
Source§

fn xzero(&mut self, dst: XReg)

Set dst = 0
Source§

fn xone(&mut self, dst: XReg)

Set dst = 1
Source§

fn xconst8(&mut self, dst: XReg, imm: i8)

Set dst = sign_extend(imm8).
Source§

fn xconst16(&mut self, dst: XReg, imm: i16)

Set dst = sign_extend(imm16).
Source§

fn xconst32(&mut self, dst: XReg, imm: i32)

Set dst = sign_extend(imm32).
Source§

fn xconst64(&mut self, dst: XReg, imm: i64)

Set dst = imm64.
Source§

fn xadd32(&mut self, operands: BinaryOperands<XReg>)

32-bit wrapping addition: low32(dst) = low32(src1) + low32(src2). Read more
Source§

fn xadd32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)

Same as xadd32 but src2 is a zero-extended 8-bit immediate.
Source§

fn xadd32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)

Same as xadd32 but src2 is a 32-bit immediate.
Source§

fn xadd64(&mut self, operands: BinaryOperands<XReg>)

64-bit wrapping addition: dst = src1 + src2.
Source§

fn xadd64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)

Same as xadd64 but src2 is a zero-extended 8-bit immediate.
Source§

fn xadd64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)

Same as xadd64 but src2 is a zero-extended 32-bit immediate.
Source§

fn xmadd32(&mut self, dst: XReg, src1: XReg, src2: XReg, src3: XReg)

low32(dst) = low32(src1) * low32(src2) + low32(src3)
Source§

fn xmadd64(&mut self, dst: XReg, src1: XReg, src2: XReg, src3: XReg)

dst = src1 * src2 + src3
Source§

fn xsub32(&mut self, operands: BinaryOperands<XReg>)

32-bit wrapping subtraction: low32(dst) = low32(src1) - low32(src2). Read more
Source§

fn xsub32_u8(&mut self, dst: XReg, src1: XReg, src2: u8)

Same as xsub32 but src2 is a zero-extended 8-bit immediate.
Source§

fn xsub32_u32(&mut self, dst: XReg, src1: XReg, src2: u32)

Same as xsub32 but src2 is a 32-bit immediate.
Source§

fn xsub64(&mut self, operands: BinaryOperands<XReg>)

64-bit wrapping subtraction: dst = src1 - src2.
Source§

fn xsub64_u8(&mut self, dst: XReg, src1: XReg, src2: u8)

Same as xsub64 but src2 is a zero-extended 8-bit immediate.
Source§

fn xsub64_u32(&mut self, dst: XReg, src1: XReg, src2: u32)

Same as xsub64 but src2 is a zero-extended 32-bit immediate.
Source§

fn xmul32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) * low32(src2)
Source§

fn xmul32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xmul64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xmul32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xmul32 but src2 is a sign-extended 32-bit immediate.
Source§

fn xmul64(&mut self, operands: BinaryOperands<XReg>)

dst = src1 * src2
Source§

fn xmul64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xmul64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xmul64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xmul64 but src2 is a sign-extended 64-bit immediate.
Source§

fn xctz32(&mut self, dst: XReg, src: XReg)

low32(dst) = trailing_zeros(low32(src))
Source§

fn xctz64(&mut self, dst: XReg, src: XReg)

dst = trailing_zeros(src)
Source§

fn xclz32(&mut self, dst: XReg, src: XReg)

low32(dst) = leading_zeros(low32(src))
Source§

fn xclz64(&mut self, dst: XReg, src: XReg)

dst = leading_zeros(src)
Source§

fn xpopcnt32(&mut self, dst: XReg, src: XReg)

low32(dst) = count_ones(low32(src))
Source§

fn xpopcnt64(&mut self, dst: XReg, src: XReg)

dst = count_ones(src)
Source§

fn xrotl32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = rotate_left(low32(src1), low32(src2))
Source§

fn xrotl64(&mut self, operands: BinaryOperands<XReg>)

dst = rotate_left(src1, src2)
Source§

fn xrotr32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = rotate_right(low32(src1), low32(src2))
Source§

fn xrotr64(&mut self, operands: BinaryOperands<XReg>)

dst = rotate_right(src1, src2)
Source§

fn xshl32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) << low5(src2)
Source§

fn xshr32_s(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) >> low5(src2)
Source§

fn xshr32_u(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) >> low5(src2)
Source§

fn xshl64(&mut self, operands: BinaryOperands<XReg>)

dst = src1 << low5(src2)
Source§

fn xshr64_s(&mut self, operands: BinaryOperands<XReg>)

dst = src1 >> low6(src2)
Source§

fn xshr64_u(&mut self, operands: BinaryOperands<XReg>)

dst = src1 >> low6(src2)
Source§

fn xshl32_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

low32(dst) = low32(src1) << low5(src2)
Source§

fn xshr32_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

low32(dst) = low32(src1) >> low5(src2)
Source§

fn xshr32_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

low32(dst) = low32(src1) >> low5(src2)
Source§

fn xshl64_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

dst = src1 << low5(src2)
Source§

fn xshr64_s_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

dst = src1 >> low6(src2)
Source§

fn xshr64_u_u6(&mut self, operands: BinaryOperands<XReg, XReg, U6>)

dst = src1 >> low6(src2)
Source§

fn xneg32(&mut self, dst: XReg, src: XReg)

low32(dst) = -low32(src)
Source§

fn xneg64(&mut self, dst: XReg, src: XReg)

dst = -src
Source§

fn xeq64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 == src2
Source§

fn xneq64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 != src2
Source§

fn xslt64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 < src2 (signed)
Source§

fn xslteq64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 <= src2 (signed)
Source§

fn xult64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 < src2 (unsigned)
Source§

fn xulteq64(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = src1 <= src2 (unsigned)
Source§

fn xeq32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) == low32(src2)
Source§

fn xneq32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) != low32(src2)
Source§

fn xslt32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) < low32(src2) (signed)
Source§

fn xslteq32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) <= low32(src2) (signed)
Source§

fn xult32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) < low32(src2) (unsigned)
Source§

fn xulteq32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) <= low32(src2) (unsigned)
Source§

fn xload8_u32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = zext_8_32(*addr)
Source§

fn xload8_s32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = sext_8_32(*addr)
Source§

fn xload16le_u32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = o32ext_16_32(*addr)
Source§

fn xload16le_s32_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = sext_16_32(*addr)
Source§

fn xload32le_o32(&mut self, dst: XReg, addr: AddrO32)

low32(dst) = *addr
Source§

fn xload64le_o32(&mut self, dst: XReg, addr: AddrO32)

dst = *addr
Source§

fn xstore8_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low8(src)
Source§

fn xstore16le_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low16(src)
Source§

fn xstore32le_o32(&mut self, addr: AddrO32, src: XReg)

*addr = low32(src)
Source§

fn xstore64le_o32(&mut self, addr: AddrO32, src: XReg)

*addr = src
Source§

fn xload8_u32_z(&mut self, dst: XReg, addr: AddrZ)

low32(dst) = zext_8_32(*addr)
Source§

fn xload8_s32_z(&mut self, dst: XReg, addr: AddrZ)

low32(dst) = sext_8_32(*addr)
Source§

fn xload16le_u32_z(&mut self, dst: XReg, addr: AddrZ)

low32(dst) = zext_16_32(*addr)
Source§

fn xload16le_s32_z(&mut self, dst: XReg, addr: AddrZ)

low32(dst) = sext_16_32(*addr)
Source§

fn xload32le_z(&mut self, dst: XReg, addr: AddrZ)

low32(dst) = *addr
Source§

fn xload64le_z(&mut self, dst: XReg, addr: AddrZ)

dst = *addr
Source§

fn xstore8_z(&mut self, addr: AddrZ, src: XReg)

*addr = low8(src)
Source§

fn xstore16le_z(&mut self, addr: AddrZ, src: XReg)

*addr = low16(src)
Source§

fn xstore32le_z(&mut self, addr: AddrZ, src: XReg)

*addr = low32(src)
Source§

fn xstore64le_z(&mut self, addr: AddrZ, src: XReg)

*addr = src
Source§

fn xload8_u32_g32(&mut self, dst: XReg, addr: AddrG32)

low32(dst) = zext_8_32(*addr)
Source§

fn xload8_s32_g32(&mut self, dst: XReg, addr: AddrG32)

low32(dst) = sext_8_32(*addr)
Source§

fn xload16le_u32_g32(&mut self, dst: XReg, addr: AddrG32)

low32(dst) = zext_16_32(*addr)
Source§

fn xload16le_s32_g32(&mut self, dst: XReg, addr: AddrG32)

low32(dst) = sext_16_32(*addr)
Source§

fn xload32le_g32(&mut self, dst: XReg, addr: AddrG32)

low32(dst) = *addr
Source§

fn xload64le_g32(&mut self, dst: XReg, addr: AddrG32)

dst = *addr
Source§

fn xstore8_g32(&mut self, addr: AddrG32, src: XReg)

*addr = low8(src)
Source§

fn xstore16le_g32(&mut self, addr: AddrG32, src: XReg)

*addr = low16(src)
Source§

fn xstore32le_g32(&mut self, addr: AddrG32, src: XReg)

*addr = low32(src)
Source§

fn xstore64le_g32(&mut self, addr: AddrG32, src: XReg)

*addr = src
Source§

fn xload8_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

low32(dst) = zext_8_32(*addr)
Source§

fn xload8_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

low32(dst) = sext_8_32(*addr)
Source§

fn xload16le_u32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

low32(dst) = zext_16_32(*addr)
Source§

fn xload16le_s32_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

low32(dst) = sext_16_32(*addr)
Source§

fn xload32le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

low32(dst) = *addr
Source§

fn xload64le_g32bne(&mut self, dst: XReg, addr: AddrG32Bne)

dst = *addr
Source§

fn xstore8_g32bne(&mut self, addr: AddrG32Bne, src: XReg)

*addr = low8(src)
Source§

fn xstore16le_g32bne(&mut self, addr: AddrG32Bne, src: XReg)

*addr = low16(src)
Source§

fn xstore32le_g32bne(&mut self, addr: AddrG32Bne, src: XReg)

*addr = low32(src)
Source§

fn xstore64le_g32bne(&mut self, addr: AddrG32Bne, src: XReg)

*addr = src
Source§

fn push_frame(&mut self)

push lr; push fp; fp = sp
Source§

fn pop_frame(&mut self)

sp = fp; pop fp; pop lr
Source§

fn push_frame_save(&mut self, amt: u16, regs: UpperRegSet<XReg>)

Macro-instruction to enter a function, allocate some stack, and then save some registers. Read more
Source§

fn pop_frame_restore(&mut self, amt: u16, regs: UpperRegSet<XReg>)

Inverse of push_frame_save. Restores regs from the top of the stack, then runs stack_free32 amt, then runs pop_frame.
Source§

fn stack_alloc32(&mut self, amt: u32)

sp = sp.checked_sub(amt)
Source§

fn stack_free32(&mut self, amt: u32)

sp = sp + amt
Source§

fn zext8(&mut self, dst: XReg, src: XReg)

dst = zext(low8(src))
Source§

fn zext16(&mut self, dst: XReg, src: XReg)

dst = zext(low16(src))
Source§

fn zext32(&mut self, dst: XReg, src: XReg)

dst = zext(low32(src))
Source§

fn sext8(&mut self, dst: XReg, src: XReg)

dst = sext(low8(src))
Source§

fn sext16(&mut self, dst: XReg, src: XReg)

dst = sext(low16(src))
Source§

fn sext32(&mut self, dst: XReg, src: XReg)

dst = sext(low32(src))
Source§

fn xabs32(&mut self, dst: XReg, src: XReg)

low32(dst) = |low32(src)|
Source§

fn xabs64(&mut self, dst: XReg, src: XReg)

dst = |src|
Source§

fn xdiv32_s(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) / low32(src2) (signed)
Source§

fn xdiv64_s(&mut self, operands: BinaryOperands<XReg>)

dst = src1 / src2 (signed)
Source§

fn xdiv32_u(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) / low32(src2) (unsigned)
Source§

fn xdiv64_u(&mut self, operands: BinaryOperands<XReg>)

dst = src1 / src2 (unsigned)
Source§

fn xrem32_s(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) % low32(src2) (signed)
Source§

fn xrem64_s(&mut self, operands: BinaryOperands<XReg>)

dst = src1 / src2 (signed)
Source§

fn xrem32_u(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) % low32(src2) (unsigned)
Source§

fn xrem64_u(&mut self, operands: BinaryOperands<XReg>)

dst = src1 / src2 (unsigned)
Source§

fn xband32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) & low32(src2)
Source§

fn xband32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xband64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xband32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xband32 but src2 is a sign-extended 32-bit immediate.
Source§

fn xband64(&mut self, operands: BinaryOperands<XReg>)

dst = src1 & src2
Source§

fn xband64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xband64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xband64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xband64 but src2 is a sign-extended 32-bit immediate.
Source§

fn xbor32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) | low32(src2)
Source§

fn xbor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xbor64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xbor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xbor32 but src2 is a sign-extended 32-bit immediate.
Source§

fn xbor64(&mut self, operands: BinaryOperands<XReg>)

dst = src1 | src2
Source§

fn xbor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xbor64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xbor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xbor64 but src2 is a sign-extended 32-bit immediate.
Source§

fn xbxor32(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = low32(src1) ^ low32(src2)
Source§

fn xbxor32_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xbxor32_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xbxor32 but src2 is a sign-extended 32-bit immediate.
Source§

fn xbxor64(&mut self, operands: BinaryOperands<XReg>)

dst = src1 ^ src2
Source§

fn xbxor64_s8(&mut self, dst: XReg, src1: XReg, src2: i8)

Same as xbxor64 but src2 is a sign-extended 8-bit immediate.
Source§

fn xbxor64_s32(&mut self, dst: XReg, src1: XReg, src2: i32)

Same as xbxor64 but src2 is a sign-extended 32-bit immediate.
Source§

fn xbnot32(&mut self, dst: XReg, src: XReg)

low32(dst) = !low32(src1)
Source§

fn xbnot64(&mut self, dst: XReg, src: XReg)

dst = !src1
Source§

fn xmin32_u(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = min(low32(src1), low32(src2)) (unsigned)
Source§

fn xmin32_s(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = min(low32(src1), low32(src2)) (signed)
Source§

fn xmax32_u(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = max(low32(src1), low32(src2)) (unsigned)
Source§

fn xmax32_s(&mut self, operands: BinaryOperands<XReg>)

low32(dst) = max(low32(src1), low32(src2)) (signed)
Source§

fn xmin64_u(&mut self, operands: BinaryOperands<XReg>)

dst = min(src1, src2) (unsigned)
Source§

fn xmin64_s(&mut self, operands: BinaryOperands<XReg>)

dst = min(src1, src2) (signed)
Source§

fn xmax64_u(&mut self, operands: BinaryOperands<XReg>)

dst = max(src1, src2) (unsigned)
Source§

fn xmax64_s(&mut self, operands: BinaryOperands<XReg>)

dst = max(src1, src2) (signed)
Source§

fn xselect32(&mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg)

low32(dst) = low32(cond) ? low32(if_nonzero) : low32(if_zero)
Source§

fn xselect64(&mut self, dst: XReg, cond: XReg, if_nonzero: XReg, if_zero: XReg)

dst = low32(cond) ? if_nonzero : if_zero

Auto Trait Implementations§

§

impl<'a> Freeze for Disassembler<'a>

§

impl<'a> RefUnwindSafe for Disassembler<'a>

§

impl<'a> Send for Disassembler<'a>

§

impl<'a> Sync for Disassembler<'a>

§

impl<'a> Unpin for Disassembler<'a>

§

impl<'a> UnwindSafe for Disassembler<'a>

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.