From 87bec89b104b38a1ceb0768152197ca4abb9b29b Mon Sep 17 00:00:00 2001 From: kobalicek Date: Thu, 14 Sep 2023 16:47:52 +0200 Subject: [PATCH] [API] Deprecated the use of Operand::size() From this moment, the use of Operand::size() is discouraged: - Use BaseReg::size() to get a size of a register - Use x86::Mem::size() to get a size of a x88 memory operand - Use Operand::x86RmSize() to get a size of a register or x86 memory operand (convenience) The problem with size occupying bits of each operand is problematic as we want to pack more information to architecture specific operands. For example AArch32 and AArch64 memory operands need more payload, which can use the bits that were used by size in the past. --- src/asmjit/core/operand.h | 52 +++-- src/asmjit/x86/x86assembler.cpp | 344 +++++++++++++++---------------- src/asmjit/x86/x86emithelper.cpp | 4 +- src/asmjit/x86/x86formatter.cpp | 2 +- src/asmjit/x86/x86instapi.cpp | 58 +++--- src/asmjit/x86/x86operand.h | 18 ++ 6 files changed, 257 insertions(+), 221 deletions(-) diff --git a/src/asmjit/core/operand.h b/src/asmjit/core/operand.h index f8e0bc7..6ffde24 100644 --- a/src/asmjit/core/operand.h +++ b/src/asmjit/core/operand.h @@ -627,23 +627,6 @@ struct Operand_ { //! Tests whether the operand is a virtual register. ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; } - //! Tests whether the operand specifies a size (i.e. the size is not zero). - ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField(); } - //! Tests whether the size of the operand matches `size`. - ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } - - //! Returns the size of the operand in bytes. - //! - //! The value returned depends on the operand type: - //! * None - Should always return zero size. - //! * Reg - Should always return the size of the register. If the register size depends on architecture - //! (like `x86::CReg` and `x86::DReg`) the size returned should be the greatest possible (so it - //! should return 64-bit size in such case). - //! * Mem - Size is optional and will be in most cases zero. - //! * Imm - Should always return zero size. - //! * Label - Should always return zero size. - ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } - //! Returns the operand id. //! //! The value returned should be interpreted accordingly to the operand type: @@ -682,6 +665,32 @@ struct Operand_ { } //! \} + + //! \name Accessors (X86 Specific) + //! \{ + + //! Returns a size of a register or an X86 memory operand. + //! + //! At the moment only X86 and X86_64 memory operands have a size - other memory operands can use bits that represent + //! size as an additional payload. This means that memory size is architecture specific and should be accessed via + //! \ref x86::Mem::size(). Sometimes when the user knows that the operand is either a register or memory operand this + //! function can be helpful as it avoids casting. + ASMJIT_INLINE_NODEBUG constexpr uint32_t x86RmSize() const noexcept { + return _signature.size(); + } + +#if !defined(ASMJIT_NO_DEPRECATED) + ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead if, your target is X86/X86_64") + ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return x86RmSize() != 0u; } + + ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead if, your target is X86/X86_64") + ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return x86RmSize() == s; } + + ASMJIT_DEPRECATED("size() is no longer portable - use x86RmSize() instead, if your target is X86/X86_64") + ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } +#endif + + //! \} }; //! Base class representing an operand in AsmJit (default constructed version). @@ -936,6 +945,15 @@ public: //! Returns the register group. ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); } + //! Tests whether the register specifies a size (i.e. the size is not zero). + ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField(); } + //! Tests whether the register size matches size `s`. + ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } + + //! Returns the size of the register in bytes. If the register size depends on architecture (like `x86::CReg` and + //! `x86::DReg`) the size returned should be the greatest possible (so it should return 64-bit size in such case). + ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } + //! Returns operation predicate of the register (ARM/AArch64). //! //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp diff --git a/src/asmjit/x86/x86assembler.cpp b/src/asmjit/x86/x86assembler.cpp index 6eabd2e..1dd0096 100644 --- a/src/asmjit/x86/x86assembler.cpp +++ b/src/asmjit/x86/x86assembler.cpp @@ -740,11 +740,11 @@ ASMJIT_FAVOR_SPEED Error Assembler::_emit(InstId instId, const Operand_& o0, con case InstDB::kEncodingX86M_NoMemSize: if (o0.isReg()) - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); goto CaseX86M_NoSize; case InstDB::kEncodingX86M: - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingX86M_NoSize: @@ -783,20 +783,20 @@ CaseX86M_GPB_MulDiv: // [?DX:?AX] <- [?DX:?AX] div|mul r16|r32|r64 if (isign3 == ENC_OPS3(Reg, Reg, Reg)) { - if (ASMJIT_UNLIKELY(o0.size() != o1.size())) + if (ASMJIT_UNLIKELY(o0.x86RmSize() != o1.x86RmSize())) goto InvalidInstruction; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o2.id(); goto EmitX86R; } // [?DX:?AX] <- [?DX:?AX] div|mul m16|m32|m64 if (isign3 == ENC_OPS3(Reg, Reg, Mem)) { - if (ASMJIT_UNLIKELY(o0.size() != o1.size())) + if (ASMJIT_UNLIKELY(o0.x86RmSize() != o1.x86RmSize())) goto InvalidInstruction; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rmRel = &o2; goto EmitX86M; } @@ -808,10 +808,10 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86M_GPB: if (isign3 == ENC_OPS1(Reg)) { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o0.id(); - if (o0.size() != 1) + if (o0.x86RmSize() != 1) goto EmitX86R; FIXUP_GPB(o0, rbReg); @@ -819,10 +819,10 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS1(Mem)) { - if (ASMJIT_UNLIKELY(o0.size() == 0)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 0)) goto AmbiguousOperandSize; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rmRel = &o0; goto EmitX86M; } @@ -851,20 +851,20 @@ CaseX86M_GPB_MulDiv: opReg = 0; if (isign3 == ENC_OPS1(Reg)) { - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); rbReg = o0.id(); goto EmitX86R; } if (isign3 == ENC_OPS1(Mem)) { - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); rmRel = &o0; goto EmitX86M; } // Two operand NOP instruction "0F 1F /r". opReg = o1.id(); - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); if (isign3 == ENC_OPS2(Reg, Reg)) { rbReg = o0.id(); @@ -911,7 +911,7 @@ CaseX86M_GPB_MulDiv: break; case InstDB::kEncodingX86Rm: - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingX86Rm_NoSize: @@ -934,10 +934,10 @@ CaseX86M_GPB_MulDiv: opReg = o0.id(); rbReg = o1.id(); - if (o0.size() == 2) + if (o0.x86RmSize() == 2) writer.emit8(0x66); else - opcode.addWBySize(o0.size()); + opcode.addWBySize(o0.x86RmSize()); goto EmitX86R; } @@ -945,16 +945,16 @@ CaseX86M_GPB_MulDiv: opReg = o0.id(); rmRel = &o1; - if (o0.size() == 2) + if (o0.x86RmSize() == 2) writer.emit8(0x66); else - opcode.addWBySize(o0.size()); + opcode.addWBySize(o0.x86RmSize()); goto EmitX86M; } break; case InstDB::kEncodingX86Mr: - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingX86Mr_NoSize: @@ -973,15 +973,15 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Arith: if (isign3 == ENC_OPS2(Reg, Reg)) { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); - if (o0.size() != o1.size()) + if (o0.x86RmSize() != o1.x86RmSize()) goto OperandSizeMismatch; rbReg = o0.id(); opReg = o1.id(); - if (o0.size() == 1) { + if (o0.x86RmSize() == 1) { FIXUP_GPB(o0, rbReg); FIXUP_GPB(o1, opReg); } @@ -998,12 +998,12 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS2(Reg, Mem)) { opcode += 2; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); opReg = o0.id(); rmRel = &o1; - if (o0.size() != 1) + if (o0.x86RmSize() != 1) goto EmitX86M; FIXUP_GPB(o0, opReg); @@ -1011,11 +1011,11 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; - if (o1.size() != 1) + if (o1.x86RmSize() != 1) goto EmitX86M; FIXUP_GPB(o1, opReg); @@ -1026,7 +1026,7 @@ CaseX86M_GPB_MulDiv: opcode = 0x80; if (isign3 == ENC_OPS2(Reg, Imm)) { - uint32_t size = o0.size(); + uint32_t size = o0.x86RmSize(); rbReg = o0.id(); immValue = o1.as().value(); @@ -1080,7 +1080,7 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Mem, Imm)) { - uint32_t memSize = o0.size(); + uint32_t memSize = o0.x86RmSize(); if (ASMJIT_UNLIKELY(memSize == 0)) goto AmbiguousOperandSize; @@ -1105,25 +1105,25 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Bswap: if (isign3 == ENC_OPS1(Reg)) { - if (ASMJIT_UNLIKELY(o0.size() == 1)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 1)) goto InvalidInstruction; opReg = o0.id(); - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); goto EmitX86OpReg; } break; case InstDB::kEncodingX86Bt: if (isign3 == ENC_OPS2(Reg, Reg)) { - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg = o1.id(); rbReg = o0.id(); goto EmitX86R; } if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; goto EmitX86M; @@ -1134,7 +1134,7 @@ CaseX86M_GPB_MulDiv: immSize = 1; opcode = x86AltOpcodeOf(instInfo); - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg = opcode.extractModO(); if (isign3 == ENC_OPS2(Reg, Imm)) { @@ -1143,7 +1143,7 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Mem, Imm)) { - if (ASMJIT_UNLIKELY(o0.size() == 0)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 0)) goto AmbiguousOperandSize; rmRel = &o0; @@ -1176,14 +1176,14 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Reg, Reg)) { - if (o0.size() != o1.size()) + if (o0.x86RmSize() != o1.x86RmSize()) goto OperandSizeMismatch; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o0.id(); opReg = o1.id(); - if (o0.size() != 1) + if (o0.x86RmSize() != 1) goto EmitX86R; FIXUP_GPB(o0, rbReg); @@ -1192,11 +1192,11 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; - if (o1.size() != 1) + if (o1.x86RmSize() != 1) goto EmitX86M; FIXUP_GPB(o1, opReg); @@ -1225,18 +1225,18 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Crc: opReg = o0.id(); - opcode.addWBySize(o0.size()); + opcode.addWBySize(o0.x86RmSize()); if (isign3 == ENC_OPS2(Reg, Reg)) { rbReg = o1.id(); - if (o1.size() == 1) { + if (o1.x86RmSize() == 1) { FIXUP_GPB(o1, rbReg); goto EmitX86R; } else { // This seems to be the only exception of encoding '66F2' prefix. - if (o1.size() == 2) writer.emit8(0x66); + if (o1.x86RmSize() == 2) writer.emit8(0x66); opcode.add(1); goto EmitX86R; @@ -1245,13 +1245,13 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS2(Reg, Mem)) { rmRel = &o1; - if (o1.size() == 0) + if (o1.x86RmSize() == 0) goto AmbiguousOperandSize; // This seems to be the only exception of encoding '66F2' prefix. - if (o1.size() == 2) writer.emit8(0x66); + if (o1.x86RmSize() == 2) writer.emit8(0x66); - opcode += o1.size() != 1; + opcode += o1.x86RmSize() != 1; goto EmitX86M; } break; @@ -1271,14 +1271,14 @@ CaseX86M_GPB_MulDiv: // First process all forms distinct of `kEncodingX86M_OptB_MulDiv`. if (isign3 == ENC_OPS3(Reg, Reg, Imm)) { opcode = 0x6B; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); immValue = o2.as().value(); immSize = 1; if (!Support::isInt8(immValue) || Support::test(options, InstOptions::kLongForm)) { opcode -= 2; - immSize = o0.size() == 2 ? 2 : 4; + immSize = o0.x86RmSize() == 2 ? 2 : 4; } opReg = o0.id(); @@ -1289,18 +1289,18 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS3(Reg, Mem, Imm)) { opcode = 0x6B; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); immValue = o2.as().value(); immSize = 1; // Sign extend so isInt8 returns the right result. - if (o0.size() == 4) + if (o0.x86RmSize() == 4) immValue = x86SignExtendI32(immValue); if (!Support::isInt8(immValue) || Support::test(options, InstOptions::kLongForm)) { opcode -= 2; - immSize = o0.size() == 2 ? 2 : 4; + immSize = o0.x86RmSize() == 2 ? 2 : 4; } opReg = o0.id(); @@ -1311,48 +1311,48 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS2(Reg, Reg)) { // Must be explicit 'ax, r8' form. - if (o1.size() == 1) + if (o1.x86RmSize() == 1) goto CaseX86M_GPB_MulDiv; - if (o0.size() != o1.size()) + if (o0.x86RmSize() != o1.x86RmSize()) goto OperandSizeMismatch; opReg = o0.id(); rbReg = o1.id(); opcode = Opcode::k000F00 | 0xAF; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); goto EmitX86R; } if (isign3 == ENC_OPS2(Reg, Mem)) { // Must be explicit 'ax, m8' form. - if (o1.size() == 1) + if (o1.x86RmSize() == 1) goto CaseX86M_GPB_MulDiv; opReg = o0.id(); rmRel = &o1; opcode = Opcode::k000F00 | 0xAF; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); goto EmitX86M; } // Shorthand to imul 'reg, reg, imm'. if (isign3 == ENC_OPS2(Reg, Imm)) { opcode = 0x6B; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); immValue = o1.as().value(); immSize = 1; // Sign extend so isInt8 returns the right result. - if (o0.size() == 4) + if (o0.x86RmSize() == 4) immValue = x86SignExtendI32(immValue); if (!Support::isInt8(immValue) || Support::test(options, InstOptions::kLongForm)) { opcode -= 2; - immSize = o0.size() == 2 ? 2 : 4; + immSize = o0.x86RmSize() == 2 ? 2 : 4; } opReg = rbReg = o0.id(); @@ -1370,8 +1370,8 @@ CaseX86M_GPB_MulDiv: immValue = o1.as().valueAs(); immSize = 1; - opcode = x86AltOpcodeOf(instInfo) + (o0.size() != 1); - opcode.add66hBySize(o0.size()); + opcode = x86AltOpcodeOf(instInfo) + (o0.x86RmSize() != 1); + opcode.add66hBySize(o0.x86RmSize()); goto EmitX86Op; } @@ -1379,8 +1379,8 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdAx || o1.id() != Gp::kIdDx)) goto InvalidInstruction; - opcode += o0.size() != 1; - opcode.add66hBySize(o0.size()); + opcode += o0.x86RmSize() != 1; + opcode.add66hBySize(o0.x86RmSize()); goto EmitX86Op; } break; @@ -1390,7 +1390,7 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(!x86IsImplicitMem(o0, Gp::kIdDi) || o1.id() != Gp::kIdDx)) goto InvalidInstruction; - uint32_t size = o0.size(); + uint32_t size = o0.x86RmSize(); if (ASMJIT_UNLIKELY(size == 0)) goto AmbiguousOperandSize; @@ -1406,7 +1406,7 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS1(Reg)) { rbReg = o0.id(); - if (o0.size() == 1) { + if (o0.x86RmSize() == 1) { FIXUP_GPB(o0, rbReg); goto EmitX86R; } @@ -1414,19 +1414,19 @@ CaseX86M_GPB_MulDiv: if (is32Bit()) { // INC r16|r32 is only encodable in 32-bit mode (collides with REX). opcode = x86AltOpcodeOf(instInfo) + (rbReg & 0x07); - opcode.add66hBySize(o0.size()); + opcode.add66hBySize(o0.x86RmSize()); goto EmitX86Op; } else { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); goto EmitX86R; } } if (isign3 == ENC_OPS1(Mem)) { - if (!o0.size()) + if (!o0.x86RmSize()) goto AmbiguousOperandSize; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rmRel = &o0; goto EmitX86M; } @@ -1457,7 +1457,7 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(!Reg::isGp(o0, Gp::kIdCx))) goto InvalidInstruction; - writer.emitAddressOverride((is32Bit() && o0.size() == 2) || (is64Bit() && o0.size() == 4)); + writer.emitAddressOverride((is32Bit() && o0.x86RmSize() == 2) || (is64Bit() && o0.x86RmSize() == 4)); rmRel = &o1; } @@ -1487,7 +1487,7 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86LcallLjmp: if (isign3 == ENC_OPS1(Mem)) { rmRel = &o0; - uint32_t mSize = rmRel->size(); + uint32_t mSize = rmRel->as().size(); if (mSize == 0) { mSize = registerSize(); } @@ -1519,7 +1519,7 @@ CaseX86M_GPB_MulDiv: case InstDB::kEncodingX86Lea: if (isign3 == ENC_OPS2(Reg, Mem)) { - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg = o0.id(); rmRel = &o1; goto EmitX86M; @@ -1540,8 +1540,8 @@ CaseX86M_GPB_MulDiv: // GP <- GP if (Reg::isGp(o1)) { - uint32_t opSize = o0.size(); - if (opSize != o1.size()) + uint32_t opSize = o0.x86RmSize(); + if (opSize != o1.x86RmSize()) goto InvalidInstruction; if (opSize == 1) { @@ -1572,7 +1572,7 @@ CaseX86M_GPB_MulDiv: // GP <- SReg if (Reg::isSReg(o1)) { opcode = 0x8C; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg--; goto EmitX86R; } @@ -1606,7 +1606,7 @@ CaseX86M_GPB_MulDiv: // SReg <- GP if (Reg::isSReg(o0)) { opcode = 0x8E; - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg--; goto EmitX86R; } @@ -1640,25 +1640,25 @@ CaseX86M_GPB_MulDiv: // SReg <- Mem if (Reg::isSReg(o0)) { opcode = 0x8E; - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg--; goto EmitX86M; } // Reg <- Mem else { opcode = 0; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); // Handle a special form of `mov al|ax|eax|rax, [ptr64]` that doesn't use MOD. if (opReg == Gp::kIdAx && !rmRel->as().hasBaseOrIndex()) { - if (x86ShouldUseMovabs(this, writer, o0.size(), options, rmRel->as())) { + if (x86ShouldUseMovabs(this, writer, o0.x86RmSize(), options, rmRel->as())) { opcode += 0xA0; immValue = rmRel->as().offset(); goto EmitX86OpMovAbs; } } - if (o0.size() == 1) + if (o0.x86RmSize() == 1) FIXUP_GPB(o0, opReg); opcode += 0x8A; @@ -1673,25 +1673,25 @@ CaseX86M_GPB_MulDiv: // Mem <- SReg if (Reg::isSReg(o1)) { opcode = 0x8C; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg--; goto EmitX86M; } // Mem <- Reg else { opcode = 0; - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); // Handle a special form of `mov [ptr64], al|ax|eax|rax` that doesn't use MOD. if (opReg == Gp::kIdAx && !rmRel->as().hasBaseOrIndex()) { - if (x86ShouldUseMovabs(this, writer, o1.size(), options, rmRel->as())) { + if (x86ShouldUseMovabs(this, writer, o1.x86RmSize(), options, rmRel->as())) { opcode += 0xA2; immValue = rmRel->as().offset(); goto EmitX86OpMovAbs; } } - if (o1.size() == 1) + if (o1.x86RmSize() == 1) FIXUP_GPB(o1, opReg); opcode += 0x88; @@ -1701,7 +1701,7 @@ CaseX86M_GPB_MulDiv: if (isign3 == ENC_OPS2(Reg, Imm)) { opReg = o0.id(); - immSize = FastUInt8(o0.size()); + immSize = FastUInt8(o0.x86RmSize()); if (immSize == 1) { FIXUP_GPB(o0, opReg); @@ -1739,7 +1739,7 @@ CaseX86M_GPB_MulDiv: } if (isign3 == ENC_OPS2(Mem, Imm)) { - uint32_t memSize = o0.size(); + uint32_t memSize = o0.x86RmSize(); if (ASMJIT_UNLIKELY(memSize == 0)) goto AmbiguousOperandSize; @@ -1761,7 +1761,7 @@ CaseX86M_GPB_MulDiv: rmRel = &o1; opcode = 0xA0; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); if (ASMJIT_UNLIKELY(!o0.as().isGp()) || opReg != Gp::kIdAx) goto InvalidInstruction; @@ -1782,7 +1782,7 @@ CaseX86M_GPB_MulDiv: rmRel = &o0; opcode = 0xA2; - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); if (ASMJIT_UNLIKELY(!o1.as().isGp()) || opReg != Gp::kIdAx) goto InvalidInstruction; @@ -1811,14 +1811,14 @@ CaseX86M_GPB_MulDiv: break; case InstDB::kEncodingX86MovsxMovzx: - opcode.add(o1.size() != 1); - opcode.addPrefixBySize(o0.size()); + opcode.add(o1.x86RmSize() != 1); + opcode.addPrefixBySize(o0.x86RmSize()); if (isign3 == ENC_OPS2(Reg, Reg)) { opReg = o0.id(); rbReg = o1.id(); - if (o1.size() != 1) + if (o1.x86RmSize() != 1) goto EmitX86R; FIXUP_GPB(o1, rbReg); @@ -1864,8 +1864,8 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(o1.id() != Gp::kIdAx)) goto InvalidInstruction; - opcode = x86AltOpcodeOf(instInfo) + (o1.size() != 1); - opcode.add66hBySize(o1.size()); + opcode = x86AltOpcodeOf(instInfo) + (o1.x86RmSize() != 1); + opcode.add66hBySize(o1.x86RmSize()); immValue = o0.as().valueAs(); immSize = 1; @@ -1876,8 +1876,8 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdDx || o1.id() != Gp::kIdAx)) goto InvalidInstruction; - opcode.add(o1.size() != 1); - opcode.add66hBySize(o1.size()); + opcode.add(o1.x86RmSize() != 1); + opcode.add66hBySize(o1.x86RmSize()); goto EmitX86Op; } break; @@ -1887,7 +1887,7 @@ CaseX86M_GPB_MulDiv: if (ASMJIT_UNLIKELY(o0.id() != Gp::kIdDx || !x86IsImplicitMem(o1, Gp::kIdSi))) goto InvalidInstruction; - uint32_t size = o1.size(); + uint32_t size = o1.x86RmSize(); if (ASMJIT_UNLIKELY(size == 0)) goto AmbiguousOperandSize; @@ -1939,24 +1939,24 @@ CaseX86M_GPB_MulDiv: CaseX86PushPop_Gp: // We allow 2 byte, 4 byte, and 8 byte register sizes, although PUSH and POP only allow 2 bytes or // native size. On 64-bit we simply PUSH/POP 64-bit register even if 32-bit register was given. - if (ASMJIT_UNLIKELY(o0.size() < 2)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() < 2)) goto InvalidInstruction; opcode = x86AltOpcodeOf(instInfo); - opcode.add66hBySize(o0.size()); + opcode.add66hBySize(o0.x86RmSize()); opReg = o0.id(); goto EmitX86OpReg; } } if (isign3 == ENC_OPS1(Mem)) { - if (ASMJIT_UNLIKELY(o0.size() == 0)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 0)) goto AmbiguousOperandSize; - if (ASMJIT_UNLIKELY(o0.size() != 2 && o0.size() != registerSize())) + if (ASMJIT_UNLIKELY(o0.x86RmSize() != 2 && o0.x86RmSize() != registerSize())) goto InvalidInstruction; - opcode.add66hBySize(o0.size()); + opcode.add66hBySize(o0.x86RmSize()); rmRel = &o0; goto EmitX86M; } @@ -1985,10 +1985,10 @@ CaseX86PushPop_Gp: case InstDB::kEncodingX86Rot: if (o0.isReg()) { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o0.id(); - if (o0.size() == 1) + if (o0.x86RmSize() == 1) FIXUP_GPB(o0, rbReg); if (isign3 == ENC_OPS2(Reg, Reg)) { @@ -2012,9 +2012,9 @@ CaseX86PushPop_Gp: } } else { - if (ASMJIT_UNLIKELY(o0.size() == 0)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 0)) goto AmbiguousOperandSize; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); if (isign3 == ENC_OPS2(Mem, Reg)) { if (ASMJIT_UNLIKELY(o1.id() != Gp::kIdCx)) @@ -2055,7 +2055,7 @@ CaseX86PushPop_Gp: case InstDB::kEncodingX86ShldShrd: if (isign3 == ENC_OPS3(Reg, Reg, Imm)) { - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg = o1.id(); rbReg = o0.id(); @@ -2065,7 +2065,7 @@ CaseX86PushPop_Gp: } if (isign3 == ENC_OPS3(Mem, Reg, Imm)) { - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; @@ -2081,7 +2081,7 @@ CaseX86PushPop_Gp: if (ASMJIT_UNLIKELY(o2.id() != Gp::kIdCx)) goto InvalidInstruction; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg = o1.id(); rbReg = o0.id(); goto EmitX86R; @@ -2091,7 +2091,7 @@ CaseX86PushPop_Gp: if (ASMJIT_UNLIKELY(o2.id() != Gp::kIdCx)) goto InvalidInstruction; - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; goto EmitX86M; @@ -2104,8 +2104,8 @@ CaseX86PushPop_Gp: if (ASMJIT_UNLIKELY(rmRel->as().offsetLo32() || !Reg::isGp(o0.as(), Gp::kIdAx))) goto InvalidInstruction; - uint32_t size = o0.size(); - if (o1.hasSize() && ASMJIT_UNLIKELY(o1.size() != size)) + uint32_t size = o0.x86RmSize(); + if (o1.x86RmSize() != 0u && ASMJIT_UNLIKELY(o1.x86RmSize() != size)) goto OperandSizeMismatch; opcode.addArithBySize(size); @@ -2119,8 +2119,8 @@ CaseX86PushPop_Gp: if (ASMJIT_UNLIKELY(rmRel->as().offsetLo32() || !Reg::isGp(o1.as(), Gp::kIdAx))) goto InvalidInstruction; - uint32_t size = o1.size(); - if (o0.hasSize() && ASMJIT_UNLIKELY(o0.size() != size)) + uint32_t size = o1.x86RmSize(); + if (o0.x86RmSize() != 0u && ASMJIT_UNLIKELY(o0.x86RmSize() != size)) goto OperandSizeMismatch; opcode.addArithBySize(size); @@ -2138,11 +2138,11 @@ CaseX86PushPop_Gp: if (ASMJIT_UNLIKELY(o0.as().hasOffset())) goto InvalidInstruction; - uint32_t size = o1.size(); + uint32_t size = o1.x86RmSize(); if (ASMJIT_UNLIKELY(size == 0)) goto AmbiguousOperandSize; - if (ASMJIT_UNLIKELY(o0.size() != size)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() != size)) goto OperandSizeMismatch; opcode.addArithBySize(size); @@ -2152,14 +2152,14 @@ CaseX86PushPop_Gp: case InstDB::kEncodingX86Test: if (isign3 == ENC_OPS2(Reg, Reg)) { - if (o0.size() != o1.size()) + if (o0.x86RmSize() != o1.x86RmSize()) goto OperandSizeMismatch; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o0.id(); opReg = o1.id(); - if (o0.size() != 1) + if (o0.x86RmSize() != 1) goto EmitX86R; FIXUP_GPB(o0, rbReg); @@ -2168,11 +2168,11 @@ CaseX86PushPop_Gp: } if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; - if (o1.size() != 1) + if (o1.x86RmSize() != 1) goto EmitX86M; FIXUP_GPB(o1, opReg); @@ -2184,23 +2184,23 @@ CaseX86PushPop_Gp: opReg = opcode.extractModO(); if (isign3 == ENC_OPS2(Reg, Imm)) { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rbReg = o0.id(); - if (o0.size() == 1) { + if (o0.x86RmSize() == 1) { FIXUP_GPB(o0, rbReg); immValue = o1.as().valueAs(); immSize = 1; } else { immValue = o1.as().value(); - immSize = FastUInt8(Support::min(o0.size(), 4)); + immSize = FastUInt8(Support::min(o0.x86RmSize(), 4)); } // Short form - AL, AX, EAX, RAX. if (rbReg == 0 && !Support::test(options, InstOptions::kLongForm)) { opcode &= Opcode::kPP_66 | Opcode::kW; - opcode |= 0xA8 + (o0.size() != 1); + opcode |= 0xA8 + (o0.x86RmSize() != 1); goto EmitX86Op; } @@ -2208,25 +2208,25 @@ CaseX86PushPop_Gp: } if (isign3 == ENC_OPS2(Mem, Imm)) { - if (ASMJIT_UNLIKELY(o0.size() == 0)) + if (ASMJIT_UNLIKELY(o0.x86RmSize() == 0)) goto AmbiguousOperandSize; - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); rmRel = &o0; immValue = o1.as().value(); - immSize = FastUInt8(Support::min(o0.size(), 4)); + immSize = FastUInt8(Support::min(o0.x86RmSize(), 4)); goto EmitX86M; } break; case InstDB::kEncodingX86Xchg: if (isign3 == ENC_OPS2(Reg, Mem)) { - opcode.addArithBySize(o0.size()); + opcode.addArithBySize(o0.x86RmSize()); opReg = o0.id(); rmRel = &o1; - if (o0.size() != 1) + if (o0.x86RmSize() != 1) goto EmitX86M; FIXUP_GPB(o0, opReg); @@ -2239,8 +2239,8 @@ CaseX86PushPop_Gp: rbReg = o0.id(); opReg = o1.id(); - uint32_t opSize = o0.size(); - if (opSize != o1.size()) + uint32_t opSize = o0.x86RmSize(); + if (opSize != o1.x86RmSize()) goto OperandSizeMismatch; if (opSize == 1) { @@ -2278,11 +2278,11 @@ CaseX86PushPop_Gp: } if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode.addArithBySize(o1.size()); + opcode.addArithBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; - if (o1.size() == 1) { + if (o1.x86RmSize() == 1) { FIXUP_GPB(o1, opReg); } @@ -2356,7 +2356,7 @@ CaseFpuArith_Reg: if (isign3 == ENC_OPS1(Mem)) { CaseFpuArith_Mem: // 0xD8/0xDC, depends on the size of the memory operand; opReg is valid. - opcode = (o0.size() == 4) ? 0xD8 : 0xDC; + opcode = (o0.x86RmSize() == 4) ? 0xD8 : 0xDC; // Clear compressed displacement before going to EmitX86M. opcode &= ~uint32_t(Opcode::kCDSHL_Mask); @@ -2385,16 +2385,16 @@ CaseFpuArith_Mem: if (isign3 == ENC_OPS1(Mem)) { rmRel = &o0; - if (o0.size() == 4 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM32)) { + if (o0.x86RmSize() == 4 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM32)) { goto EmitX86M; } - if (o0.size() == 8 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM64)) { + if (o0.x86RmSize() == 8 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM64)) { opcode += 4; goto EmitX86M; } - if (o0.size() == 10 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM80)) { + if (o0.x86RmSize() == 10 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM80)) { opcode = x86AltOpcodeOf(instInfo); opReg = opcode.extractModO(); goto EmitX86M; @@ -2414,16 +2414,16 @@ CaseFpuArith_Mem: opcode &= ~uint32_t(Opcode::kCDSHL_Mask); rmRel = &o0; - if (o0.size() == 2 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM16)) { + if (o0.x86RmSize() == 2 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM16)) { opcode += 4; goto EmitX86M; } - if (o0.size() == 4 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM32)) { + if (o0.x86RmSize() == 4 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM32)) { goto EmitX86M; } - if (o0.size() == 8 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM64)) { + if (o0.x86RmSize() == 8 && commonInfo->hasFlag(InstDB::InstFlags::kFpuM64)) { opcode = x86AltOpcodeOf(instInfo) & ~uint32_t(Opcode::kCDSHL_Mask); opReg = opcode.extractModO(); goto EmitX86M; @@ -2550,10 +2550,10 @@ CaseFpuArith_Mem: case InstDB::kEncodingExtMovbe: if (isign3 == ENC_OPS2(Reg, Mem)) { - if (o0.size() == 1) + if (o0.x86RmSize() == 1) goto InvalidInstruction; - opcode.addPrefixBySize(o0.size()); + opcode.addPrefixBySize(o0.x86RmSize()); opReg = o0.id(); rmRel = &o1; goto EmitX86M; @@ -2563,10 +2563,10 @@ CaseFpuArith_Mem: opcode = x86AltOpcodeOf(instInfo); if (isign3 == ENC_OPS2(Mem, Reg)) { - if (o1.size() == 1) + if (o1.x86RmSize() == 1) goto InvalidInstruction; - opcode.addPrefixBySize(o1.size()); + opcode.addPrefixBySize(o1.x86RmSize()); opReg = o1.id(); rmRel = &o0; goto EmitX86M; @@ -2692,7 +2692,7 @@ CaseExtMovd: goto CaseExtRm; case InstDB::kEncodingExtRm_Wx: - opcode.addWIf(o1.size() == 8); + opcode.addWIf(o1.x86RmSize() == 8); ASMJIT_FALLTHROUGH; case InstDB::kEncodingExtRm_Wx_GpqOnly: @@ -2970,7 +2970,7 @@ CaseExtRm: break; case InstDB::kEncodingVexMr_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); if (isign3 == ENC_OPS2(Reg, Reg)) { opReg = o1.id(); @@ -2987,7 +2987,7 @@ CaseExtRm: case InstDB::kEncodingVexMr_VM: if (isign3 == ENC_OPS2(Mem, Reg)) { - opcode |= Support::max(x86OpcodeLByVMem(o0), x86OpcodeLBySize(o1.size())); + opcode |= Support::max(x86OpcodeLByVMem(o0), x86OpcodeLBySize(o1.x86RmSize())); opReg = o1.id(); rmRel = &o0; @@ -3019,7 +3019,7 @@ CaseExtRm: } case InstDB::kEncodingVexMri_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexMri: @@ -3052,15 +3052,15 @@ CaseVexMri: goto CaseVexRm; case InstDB::kEncodingVexRm_Lx_Narrow: - if (o1.size()) - opcode |= x86OpcodeLBySize(o1.size()); - else if (o0.size() == 32) + if (o1.x86RmSize()) + opcode |= x86OpcodeLBySize(o1.x86RmSize()); + else if (o0.x86RmSize() == 32) opcode |= Opcode::kLL_2; goto CaseVexRm; case InstDB::kEncodingVexRm_Lx_Bcst: if (isign3 == ENC_OPS2(Reg, Reg) && Reg::isGp(o1.as())) { - opcode = x86AltOpcodeOf(instInfo) | x86OpcodeLBySize(o0.size() | o1.size()); + opcode = x86AltOpcodeOf(instInfo) | x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); opReg = o0.id(); rbReg = o1.id(); goto EmitVexEvexR; @@ -3068,7 +3068,7 @@ CaseVexMri: ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRm_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRm: @@ -3088,7 +3088,7 @@ CaseVexRm: case InstDB::kEncodingVexRm_VM: if (isign3 == ENC_OPS2(Reg, Mem)) { - opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size())); + opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.x86RmSize())); opReg = o0.id(); rmRel = &o1; goto EmitVexEvexM; @@ -3122,7 +3122,7 @@ CaseVexRm: goto CaseVexRmi; case InstDB::kEncodingVexRmi_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRmi: @@ -3167,7 +3167,7 @@ CaseVexRvm_R: } case InstDB::kEncodingVexRvm_Wx: { - opcode.addWIf(unsigned(Reg::isGpq(o0)) | unsigned((o2.size() == 8))); + opcode.addWIf(unsigned(Reg::isGpq(o0)) | unsigned((o2.x86RmSize() == 8))); goto CaseVexRvm; } @@ -3177,7 +3177,7 @@ CaseVexRvm_R: } case InstDB::kEncodingVexRvm_Lx: { - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); goto CaseVexRvm; } @@ -3191,7 +3191,7 @@ CaseVexRvm_R: const Operand_& o3 = opExt[EmitterUtils::kOp3]; - opcode |= x86OpcodeLBySize(o2.size()); + opcode |= x86OpcodeLBySize(o2.x86RmSize()); opReg = x86PackRegAndVvvvv(o0.id(), o2.id()); if (o3.isReg()) { @@ -3208,7 +3208,7 @@ CaseVexRvm_R: } case InstDB::kEncodingVexRvmr_Lx: { - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; } @@ -3242,7 +3242,7 @@ CaseVexRvm_R: ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmi_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmi: @@ -3289,7 +3289,7 @@ VexRvmi: case InstDB::kEncodingVexRmvRm_VM: if (isign3 == ENC_OPS2(Reg, Mem)) { opcode = x86AltOpcodeOf(instInfo); - opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size())); + opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.x86RmSize())); opReg = o0.id(); rmRel = &o1; @@ -3299,7 +3299,7 @@ VexRvmi: case InstDB::kEncodingVexRmv_VM: if (isign3 == ENC_OPS3(Reg, Mem, Reg)) { - opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.size() | o2.size())); + opcode |= Support::max(x86OpcodeLByVMem(o1), x86OpcodeLBySize(o0.x86RmSize() | o2.x86RmSize())); opReg = x86PackRegAndVvvvv(o0.id(), o2.id()); rmRel = &o1; @@ -3333,14 +3333,14 @@ VexRvmi: if (isign3 == ENC_OPS2(Reg, Reg)) { if (Reg::isGp(o0)) { opcode = x86AltOpcodeOf(instInfo); - opcode.addWBySize(o0.size()); + opcode.addWBySize(o0.x86RmSize()); opReg = o1.id(); rbReg = o0.id(); goto EmitVexEvexR; } if (Reg::isGp(o1)) { - opcode.addWBySize(o1.size()); + opcode.addWBySize(o1.x86RmSize()); opReg = o0.id(); rbReg = o1.id(); goto EmitVexEvexR; @@ -3384,7 +3384,7 @@ VexRvmi: break; case InstDB::kEncodingVexRmMr_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRmMr: @@ -3440,7 +3440,7 @@ VexRvmi: break; case InstDB::kEncodingVexRvmRmi_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmRmi: @@ -3552,7 +3552,7 @@ VexRvmi: break; case InstDB::kEncodingVexRvmMvr_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmMvr: @@ -3584,7 +3584,7 @@ VexRvmi: ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmVmi_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvmVmi: @@ -3645,7 +3645,7 @@ VexRvmi: ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexVmi_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexVmi: @@ -3667,13 +3667,13 @@ CaseVexVmi_AfterImm: break; case InstDB::kEncodingVexVmi4_Wx: - opcode.addWIf(Reg::isGpq(o0) || o1.size() == 8); + opcode.addWIf(Reg::isGpq(o0) || o1.x86RmSize() == 8); immValue = o2.as().value(); immSize = 4; goto CaseVexVmi_AfterImm; case InstDB::kEncodingVexRvrmRvmr_Lx: - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingVexRvrmRvmr: { @@ -3718,7 +3718,7 @@ CaseVexVmi_AfterImm: goto InvalidInstruction; const uint32_t isign4 = isign3 + (uint32_t(o3.opType()) << 9); - opcode |= x86OpcodeLBySize(o0.size() | o1.size() | o2.size() | o3.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize() | o2.x86RmSize() | o3.x86RmSize()); immValue = o4.as().valueAs() & 0x0F; immSize = 1; @@ -3774,7 +3774,7 @@ CaseVexVmi_AfterImm: case InstDB::kEncodingFma4_Lx: // It's fine to just check the first operand, second is just for sanity. - opcode |= x86OpcodeLBySize(o0.size() | o1.size()); + opcode |= x86OpcodeLBySize(o0.x86RmSize() | o1.x86RmSize()); ASMJIT_FALLTHROUGH; case InstDB::kEncodingFma4: { diff --git a/src/asmjit/x86/x86emithelper.cpp b/src/asmjit/x86/x86emithelper.cpp index b541c04..ed8eae1 100644 --- a/src/asmjit/x86/x86emithelper.cpp +++ b/src/asmjit/x86/x86emithelper.cpp @@ -70,8 +70,8 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove( // Detect memory operands and patch them to have the same size as the register. BaseCompiler always sets memory size // of allocs and spills, so it shouldn't be really necessary, however, after this function was separated from Compiler // it's better to make sure that the size is always specified, as we can use 'movzx' and 'movsx' that rely on it. - if (dst.isMem()) { memFlags |= kDstMem; dst.as().setSize(src.size()); } - if (src.isMem()) { memFlags |= kSrcMem; src.as().setSize(dst.size()); } + if (dst.isMem()) { memFlags |= kDstMem; dst.as().setSize(src.as().size()); } + if (src.isMem()) { memFlags |= kSrcMem; src.as().setSize(dst.as().size()); } switch (typeId) { case TypeId::kInt8: diff --git a/src/asmjit/x86/x86formatter.cpp b/src/asmjit/x86/x86formatter.cpp index a2004c2..5508860 100644 --- a/src/asmjit/x86/x86formatter.cpp +++ b/src/asmjit/x86/x86formatter.cpp @@ -932,7 +932,7 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction( uint32_t vecSize = 16; for (uint32_t j = 0; j < opCount; j++) if (operands[j].isReg()) - vecSize = Support::max(vecSize, operands[j].size()); + vecSize = Support::max(vecSize, operands[j].as().size()); ASMJIT_PROPAGATE(FormatterInternal_explainConst(sb, formatFlags, instId, vecSize, op.as())); } diff --git a/src/asmjit/x86/x86instapi.cpp b/src/asmjit/x86/x86instapi.cpp index 3d8e3ad..60f8414 100644 --- a/src/asmjit/x86/x86instapi.cpp +++ b/src/asmjit/x86/x86instapi.cpp @@ -184,7 +184,7 @@ static const X86ValidationData _x64ValidationData = { #undef REG_MASK_FROM_REG_TYPE_X86 static ASMJIT_FORCE_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept { - return Reg::isZmm(op) || (op.isMem() && op.size() == 64); + return Reg::isZmm(op) || (op.isMem() && op.x86RmSize() == 64); } static ASMJIT_FORCE_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept { @@ -852,8 +852,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* uint64_t rByteMask = rwOpData.rByteMask; uint64_t wByteMask = rwOpData.wByteMask; - if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask(srcOp.size()); - if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask(srcOp.size()); + if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask(srcOp.x86RmSize()); + if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask(srcOp.x86RmSize()); op._readByteMask = rByteMask; op._writeByteMask = wByteMask; @@ -876,7 +876,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } // Aggregate values required to calculate valid Reg/M info. - rmMaxSize = Support::max(rmMaxSize, srcOp.size()); + rmMaxSize = Support::max(rmMaxSize, srcOp.x86RmSize()); rmOpsMask |= Support::bitMask(i); } else { @@ -933,7 +933,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* op.setRmSize(instRmInfo.fixedSize); break; case InstDB::RWInfoRm::kCategoryConsistent: - op.setRmSize(operands[i].size()); + op.setRmSize(operands[i].x86RmSize()); break; case InstDB::RWInfoRm::kCategoryHalf: op.setRmSize(rmMaxSize / 2u); @@ -985,8 +985,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* const Reg& o1 = operands[1].as(); if (o0.isGp() && o1.isGp()) { - out->_operands[0].reset(W | RegM, operands[0].size()); - out->_operands[1].reset(R | RegM, operands[1].size()); + out->_operands[0].reset(W | RegM, operands[0].x86RmSize()); + out->_operands[1].reset(R | RegM, operands[1].x86RmSize()); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); out->_instFlags |= InstRWFlags::kMovOp; @@ -1133,14 +1133,14 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* if (opCount == 2) { if (operands[0].isReg() && operands[1].isImm()) { - out->_operands[0].reset(X, operands[0].size()); + out->_operands[0].reset(X, operands[0].as().size()); out->_operands[1].reset(); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); return kErrorOk; } - if (Reg::isGpw(operands[0]) && operands[1].size() == 1) { + if (Reg::isGpw(operands[0]) && operands[1].x86RmSize() == 1) { // imul ax, r8/m8 <- AX = AL * r8/m8 out->_operands[0].reset(X | RegPhys, 2, Gp::kIdAx); out->_operands[0].setReadByteMask(Support::lsbMask(1)); @@ -1148,8 +1148,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } else { // imul r?, r?/m? - out->_operands[0].reset(X, operands[0].size()); - out->_operands[1].reset(R | RegM, operands[0].size()); + out->_operands[0].reset(X, operands[0].as().size()); + out->_operands[1].reset(R | RegM, operands[0].as().size()); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); } @@ -1160,8 +1160,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* if (opCount == 3) { if (operands[2].isImm()) { - out->_operands[0].reset(W, operands[0].size()); - out->_operands[1].reset(R | RegM, operands[1].size()); + out->_operands[0].reset(W, operands[0].x86RmSize()); + out->_operands[1].reset(R | RegM, operands[1].x86RmSize()); out->_operands[2].reset(); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); @@ -1170,9 +1170,9 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* return kErrorOk; } else { - out->_operands[0].reset(W | RegPhys, operands[0].size(), Gp::kIdDx); - out->_operands[1].reset(X | RegPhys, operands[1].size(), Gp::kIdAx); - out->_operands[2].reset(R | RegM, operands[2].size()); + out->_operands[0].reset(W | RegPhys, operands[0].x86RmSize(), Gp::kIdDx); + out->_operands[1].reset(X | RegPhys, operands[1].x86RmSize(), Gp::kIdAx); + out->_operands[2].reset(R | RegM, operands[2].x86RmSize()); rwZeroExtendGp(out->_operands[0], operands[0].as(), nativeGpSize); rwZeroExtendGp(out->_operands[1], operands[1].as(), nativeGpSize); @@ -1249,18 +1249,18 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* // Special case for 'vmaskmovpd|vmaskmovps|vpmaskmovd|vpmaskmovq' instructions. if (opCount == 3) { if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1]) && operands[2].isMem()) { - out->_operands[0].reset(W, operands[0].size()); - out->_operands[1].reset(R, operands[1].size()); - out->_operands[2].reset(R | MibRead, operands[1].size()); + out->_operands[0].reset(W, operands[0].x86RmSize()); + out->_operands[1].reset(R, operands[1].x86RmSize()); + out->_operands[2].reset(R | MibRead, operands[1].x86RmSize()); rwZeroExtendAvxVec(out->_operands[0], operands[0].as()); return kErrorOk; } if (operands[0].isMem() && BaseReg::isVec(operands[1]) && BaseReg::isVec(operands[2])) { - out->_operands[0].reset(X | MibRead, operands[1].size()); - out->_operands[1].reset(R, operands[1].size()); - out->_operands[2].reset(R, operands[2].size()); + out->_operands[0].reset(X | MibRead, operands[1].x86RmSize()); + out->_operands[1].reset(R, operands[1].x86RmSize()); + out->_operands[2].reset(R, operands[2].x86RmSize()); return kErrorOk; } } @@ -1273,7 +1273,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* // operand, respectively. if (opCount == 2) { if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1])) { - uint32_t o0Size = operands[0].size(); + uint32_t o0Size = operands[0].x86RmSize(); uint32_t o1Size = o0Size == 16 ? 8 : o0Size; out->_operands[0].reset(W, o0Size); @@ -1285,7 +1285,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } if (BaseReg::isVec(operands[0]) && operands[1].isMem()) { - uint32_t o0Size = operands[0].size(); + uint32_t o0Size = operands[0].x86RmSize(); uint32_t o1Size = o0Size == 16 ? 8 : o0Size; out->_operands[0].reset(W, o0Size); @@ -1305,7 +1305,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* if (BaseReg::isGp(operands[0]) && BaseReg::isVec(operands[1])) { out->_operands[0].reset(W, 1); out->_operands[0].setExtendByteMask(Support::lsbMask(nativeGpSize - 1) << 1); - out->_operands[1].reset(R, operands[1].size()); + out->_operands[1].reset(R, operands[1].x86RmSize()); return kErrorOk; } } @@ -1343,7 +1343,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } if (operands[0].isReg() && operands[1].isReg()) { - uint32_t size1 = operands[1].size(); + uint32_t size1 = operands[1].x86RmSize(); uint32_t size0 = size1 >> shift; out->_operands[0].reset(W, size0); @@ -1370,7 +1370,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } if (operands[0].isReg() && operands[1].isMem()) { - uint32_t size1 = operands[1].size() ? operands[1].size() : uint32_t(16); + uint32_t size1 = operands[1].x86RmSize() ? operands[1].x86RmSize() : uint32_t(16); uint32_t size0 = size1 >> shift; out->_operands[0].reset(W, size0); @@ -1379,7 +1379,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* } if (operands[0].isMem() && operands[1].isReg()) { - uint32_t size1 = operands[1].size(); + uint32_t size1 = operands[1].x86RmSize(); uint32_t size0 = size1 >> shift; out->_operands[0].reset(W | MibRead, size0); @@ -1420,7 +1420,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* out->_operands[2].reset(); } - uint32_t size0 = operands[0].size(); + uint32_t size0 = operands[0].x86RmSize(); uint32_t size1 = size0 >> shift; out->_operands[0].reset(W, size0); diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index 90b9256..c3907f5 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -831,6 +831,24 @@ public: //! \} + //! \name Memory Size + //! \{ + + //! Tests whether the memory operand specifies a size (i.e. the size is not zero). + ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField(); } + //! Tests whether the memory operand size matches size `s`. + ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; } + + //! Returns the size of the memory operand in bytes. + //! + //! \note Most instructions would deduce the size of the memory operand, so in most cases it's expected that the + //! returned value would be zero. However, some instruction require the size to select between multiple variations, + //! so in some cases size is required would would be non-zero (for example `inc [mem], immediate` requires size to + //! distinguish between 8-bit, 16-bit, 32-bit, and 64-bit increments. + ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField(); } + + //! \} + //! \name Address Type //! \{