[API] Deprecated the use of Operand::size()

From this moment, the use of Operand::size() is discouraged:

  - Use BaseReg::size() to get a size of a register
  - Use x86::Mem::size() to get a size of a x88 memory operand
  - Use Operand::x86RmSize() to get a size of a register or
    x86 memory operand (convenience)

The problem with size occupying bits of each operand is problematic
as we want to pack more information to architecture specific operands.
For example AArch32 and AArch64 memory operands need more payload,
which can use the bits that were used by size in the past.
This commit is contained in:
kobalicek
2023-09-14 16:47:52 +02:00
parent 46bdb67262
commit 87bec89b10
6 changed files with 257 additions and 221 deletions

View File

@@ -627,23 +627,6 @@ struct Operand_ {
//! Tests whether the operand is a virtual register. //! Tests whether the operand is a virtual register.
ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; } ASMJIT_INLINE_NODEBUG constexpr bool isVirtReg() const noexcept { return isReg() && _baseId > 0xFFu; }
//! Tests whether the operand specifies a size (i.e. the size is not zero).
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField<Signature::kSizeMask>(); }
//! Tests whether the size of the operand matches `size`.
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; }
//! Returns the size of the operand in bytes.
//!
//! The value returned depends on the operand type:
//! * None - Should always return zero size.
//! * Reg - Should always return the size of the register. If the register size depends on architecture
//! (like `x86::CReg` and `x86::DReg`) the size returned should be the greatest possible (so it
//! should return 64-bit size in such case).
//! * Mem - Size is optional and will be in most cases zero.
//! * Imm - Should always return zero size.
//! * Label - Should always return zero size.
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
//! Returns the operand id. //! Returns the operand id.
//! //!
//! The value returned should be interpreted accordingly to the operand type: //! The value returned should be interpreted accordingly to the operand type:
@@ -682,6 +665,32 @@ struct Operand_ {
} }
//! \} //! \}
//! \name Accessors (X86 Specific)
//! \{
//! Returns a size of a register or an X86 memory operand.
//!
//! At the moment only X86 and X86_64 memory operands have a size - other memory operands can use bits that represent
//! size as an additional payload. This means that memory size is architecture specific and should be accessed via
//! \ref x86::Mem::size(). Sometimes when the user knows that the operand is either a register or memory operand this
//! function can be helpful as it avoids casting.
ASMJIT_INLINE_NODEBUG constexpr uint32_t x86RmSize() const noexcept {
return _signature.size();
}
#if !defined(ASMJIT_NO_DEPRECATED)
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead if, your target is X86/X86_64")
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return x86RmSize() != 0u; }
ASMJIT_DEPRECATED("hasSize() is no longer portable - use x86RmSize() instead if, your target is X86/X86_64")
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return x86RmSize() == s; }
ASMJIT_DEPRECATED("size() is no longer portable - use x86RmSize() instead, if your target is X86/X86_64")
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
#endif
//! \}
}; };
//! Base class representing an operand in AsmJit (default constructed version). //! Base class representing an operand in AsmJit (default constructed version).
@@ -936,6 +945,15 @@ public:
//! Returns the register group. //! Returns the register group.
ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); } ASMJIT_INLINE_NODEBUG constexpr RegGroup group() const noexcept { return _signature.regGroup(); }
//! Tests whether the register specifies a size (i.e. the size is not zero).
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField<Signature::kSizeMask>(); }
//! Tests whether the register size matches size `s`.
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; }
//! Returns the size of the register in bytes. If the register size depends on architecture (like `x86::CReg` and
//! `x86::DReg`) the size returned should be the greatest possible (so it should return 64-bit size in such case).
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
//! Returns operation predicate of the register (ARM/AArch64). //! Returns operation predicate of the register (ARM/AArch64).
//! //!
//! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp //! The meaning depends on architecture, for example on ARM hardware this describes \ref arm::ShiftOp

File diff suppressed because it is too large Load Diff

View File

@@ -70,8 +70,8 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
// Detect memory operands and patch them to have the same size as the register. BaseCompiler always sets memory size // Detect memory operands and patch them to have the same size as the register. BaseCompiler always sets memory size
// of allocs and spills, so it shouldn't be really necessary, however, after this function was separated from Compiler // of allocs and spills, so it shouldn't be really necessary, however, after this function was separated from Compiler
// it's better to make sure that the size is always specified, as we can use 'movzx' and 'movsx' that rely on it. // it's better to make sure that the size is always specified, as we can use 'movzx' and 'movsx' that rely on it.
if (dst.isMem()) { memFlags |= kDstMem; dst.as<Mem>().setSize(src.size()); } if (dst.isMem()) { memFlags |= kDstMem; dst.as<Mem>().setSize(src.as<Mem>().size()); }
if (src.isMem()) { memFlags |= kSrcMem; src.as<Mem>().setSize(dst.size()); } if (src.isMem()) { memFlags |= kSrcMem; src.as<Mem>().setSize(dst.as<Mem>().size()); }
switch (typeId) { switch (typeId) {
case TypeId::kInt8: case TypeId::kInt8:

View File

@@ -932,7 +932,7 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
uint32_t vecSize = 16; uint32_t vecSize = 16;
for (uint32_t j = 0; j < opCount; j++) for (uint32_t j = 0; j < opCount; j++)
if (operands[j].isReg()) if (operands[j].isReg())
vecSize = Support::max<uint32_t>(vecSize, operands[j].size()); vecSize = Support::max<uint32_t>(vecSize, operands[j].as<Reg>().size());
ASMJIT_PROPAGATE(FormatterInternal_explainConst(sb, formatFlags, instId, vecSize, op.as<Imm>())); ASMJIT_PROPAGATE(FormatterInternal_explainConst(sb, formatFlags, instId, vecSize, op.as<Imm>()));
} }

View File

@@ -184,7 +184,7 @@ static const X86ValidationData _x64ValidationData = {
#undef REG_MASK_FROM_REG_TYPE_X86 #undef REG_MASK_FROM_REG_TYPE_X86
static ASMJIT_FORCE_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept { static ASMJIT_FORCE_INLINE bool x86IsZmmOrM512(const Operand_& op) noexcept {
return Reg::isZmm(op) || (op.isMem() && op.size() == 64); return Reg::isZmm(op) || (op.isMem() && op.x86RmSize() == 64);
} }
static ASMJIT_FORCE_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept { static ASMJIT_FORCE_INLINE bool x86CheckOSig(const InstDB::OpSignature& op, const InstDB::OpSignature& ref, bool& immOutOfRange) noexcept {
@@ -852,8 +852,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
uint64_t rByteMask = rwOpData.rByteMask; uint64_t rByteMask = rwOpData.rByteMask;
uint64_t wByteMask = rwOpData.wByteMask; uint64_t wByteMask = rwOpData.wByteMask;
if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask<uint64_t>(srcOp.size()); if (op.isRead() && !rByteMask) rByteMask = Support::lsbMask<uint64_t>(srcOp.x86RmSize());
if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask<uint64_t>(srcOp.size()); if (op.isWrite() && !wByteMask) wByteMask = Support::lsbMask<uint64_t>(srcOp.x86RmSize());
op._readByteMask = rByteMask; op._readByteMask = rByteMask;
op._writeByteMask = wByteMask; op._writeByteMask = wByteMask;
@@ -876,7 +876,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
// Aggregate values required to calculate valid Reg/M info. // Aggregate values required to calculate valid Reg/M info.
rmMaxSize = Support::max(rmMaxSize, srcOp.size()); rmMaxSize = Support::max(rmMaxSize, srcOp.x86RmSize());
rmOpsMask |= Support::bitMask<uint32_t>(i); rmOpsMask |= Support::bitMask<uint32_t>(i);
} }
else { else {
@@ -933,7 +933,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
op.setRmSize(instRmInfo.fixedSize); op.setRmSize(instRmInfo.fixedSize);
break; break;
case InstDB::RWInfoRm::kCategoryConsistent: case InstDB::RWInfoRm::kCategoryConsistent:
op.setRmSize(operands[i].size()); op.setRmSize(operands[i].x86RmSize());
break; break;
case InstDB::RWInfoRm::kCategoryHalf: case InstDB::RWInfoRm::kCategoryHalf:
op.setRmSize(rmMaxSize / 2u); op.setRmSize(rmMaxSize / 2u);
@@ -985,8 +985,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
const Reg& o1 = operands[1].as<Reg>(); const Reg& o1 = operands[1].as<Reg>();
if (o0.isGp() && o1.isGp()) { if (o0.isGp() && o1.isGp()) {
out->_operands[0].reset(W | RegM, operands[0].size()); out->_operands[0].reset(W | RegM, operands[0].x86RmSize());
out->_operands[1].reset(R | RegM, operands[1].size()); out->_operands[1].reset(R | RegM, operands[1].x86RmSize());
rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize);
out->_instFlags |= InstRWFlags::kMovOp; out->_instFlags |= InstRWFlags::kMovOp;
@@ -1133,14 +1133,14 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
if (opCount == 2) { if (opCount == 2) {
if (operands[0].isReg() && operands[1].isImm()) { if (operands[0].isReg() && operands[1].isImm()) {
out->_operands[0].reset(X, operands[0].size()); out->_operands[0].reset(X, operands[0].as<Reg>().size());
out->_operands[1].reset(); out->_operands[1].reset();
rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize);
return kErrorOk; return kErrorOk;
} }
if (Reg::isGpw(operands[0]) && operands[1].size() == 1) { if (Reg::isGpw(operands[0]) && operands[1].x86RmSize() == 1) {
// imul ax, r8/m8 <- AX = AL * r8/m8 // imul ax, r8/m8 <- AX = AL * r8/m8
out->_operands[0].reset(X | RegPhys, 2, Gp::kIdAx); out->_operands[0].reset(X | RegPhys, 2, Gp::kIdAx);
out->_operands[0].setReadByteMask(Support::lsbMask<uint64_t>(1)); out->_operands[0].setReadByteMask(Support::lsbMask<uint64_t>(1));
@@ -1148,8 +1148,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
else { else {
// imul r?, r?/m? // imul r?, r?/m?
out->_operands[0].reset(X, operands[0].size()); out->_operands[0].reset(X, operands[0].as<Gp>().size());
out->_operands[1].reset(R | RegM, operands[0].size()); out->_operands[1].reset(R | RegM, operands[0].as<Gp>().size());
rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize);
} }
@@ -1160,8 +1160,8 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
if (opCount == 3) { if (opCount == 3) {
if (operands[2].isImm()) { if (operands[2].isImm()) {
out->_operands[0].reset(W, operands[0].size()); out->_operands[0].reset(W, operands[0].x86RmSize());
out->_operands[1].reset(R | RegM, operands[1].size()); out->_operands[1].reset(R | RegM, operands[1].x86RmSize());
out->_operands[2].reset(); out->_operands[2].reset();
rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize);
@@ -1170,9 +1170,9 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
return kErrorOk; return kErrorOk;
} }
else { else {
out->_operands[0].reset(W | RegPhys, operands[0].size(), Gp::kIdDx); out->_operands[0].reset(W | RegPhys, operands[0].x86RmSize(), Gp::kIdDx);
out->_operands[1].reset(X | RegPhys, operands[1].size(), Gp::kIdAx); out->_operands[1].reset(X | RegPhys, operands[1].x86RmSize(), Gp::kIdAx);
out->_operands[2].reset(R | RegM, operands[2].size()); out->_operands[2].reset(R | RegM, operands[2].x86RmSize());
rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[0], operands[0].as<Gp>(), nativeGpSize);
rwZeroExtendGp(out->_operands[1], operands[1].as<Gp>(), nativeGpSize); rwZeroExtendGp(out->_operands[1], operands[1].as<Gp>(), nativeGpSize);
@@ -1249,18 +1249,18 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
// Special case for 'vmaskmovpd|vmaskmovps|vpmaskmovd|vpmaskmovq' instructions. // Special case for 'vmaskmovpd|vmaskmovps|vpmaskmovd|vpmaskmovq' instructions.
if (opCount == 3) { if (opCount == 3) {
if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1]) && operands[2].isMem()) { if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1]) && operands[2].isMem()) {
out->_operands[0].reset(W, operands[0].size()); out->_operands[0].reset(W, operands[0].x86RmSize());
out->_operands[1].reset(R, operands[1].size()); out->_operands[1].reset(R, operands[1].x86RmSize());
out->_operands[2].reset(R | MibRead, operands[1].size()); out->_operands[2].reset(R | MibRead, operands[1].x86RmSize());
rwZeroExtendAvxVec(out->_operands[0], operands[0].as<Vec>()); rwZeroExtendAvxVec(out->_operands[0], operands[0].as<Vec>());
return kErrorOk; return kErrorOk;
} }
if (operands[0].isMem() && BaseReg::isVec(operands[1]) && BaseReg::isVec(operands[2])) { if (operands[0].isMem() && BaseReg::isVec(operands[1]) && BaseReg::isVec(operands[2])) {
out->_operands[0].reset(X | MibRead, operands[1].size()); out->_operands[0].reset(X | MibRead, operands[1].x86RmSize());
out->_operands[1].reset(R, operands[1].size()); out->_operands[1].reset(R, operands[1].x86RmSize());
out->_operands[2].reset(R, operands[2].size()); out->_operands[2].reset(R, operands[2].x86RmSize());
return kErrorOk; return kErrorOk;
} }
} }
@@ -1273,7 +1273,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
// operand, respectively. // operand, respectively.
if (opCount == 2) { if (opCount == 2) {
if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1])) { if (BaseReg::isVec(operands[0]) && BaseReg::isVec(operands[1])) {
uint32_t o0Size = operands[0].size(); uint32_t o0Size = operands[0].x86RmSize();
uint32_t o1Size = o0Size == 16 ? 8 : o0Size; uint32_t o1Size = o0Size == 16 ? 8 : o0Size;
out->_operands[0].reset(W, o0Size); out->_operands[0].reset(W, o0Size);
@@ -1285,7 +1285,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
if (BaseReg::isVec(operands[0]) && operands[1].isMem()) { if (BaseReg::isVec(operands[0]) && operands[1].isMem()) {
uint32_t o0Size = operands[0].size(); uint32_t o0Size = operands[0].x86RmSize();
uint32_t o1Size = o0Size == 16 ? 8 : o0Size; uint32_t o1Size = o0Size == 16 ? 8 : o0Size;
out->_operands[0].reset(W, o0Size); out->_operands[0].reset(W, o0Size);
@@ -1305,7 +1305,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
if (BaseReg::isGp(operands[0]) && BaseReg::isVec(operands[1])) { if (BaseReg::isGp(operands[0]) && BaseReg::isVec(operands[1])) {
out->_operands[0].reset(W, 1); out->_operands[0].reset(W, 1);
out->_operands[0].setExtendByteMask(Support::lsbMask<uint32_t>(nativeGpSize - 1) << 1); out->_operands[0].setExtendByteMask(Support::lsbMask<uint32_t>(nativeGpSize - 1) << 1);
out->_operands[1].reset(R, operands[1].size()); out->_operands[1].reset(R, operands[1].x86RmSize());
return kErrorOk; return kErrorOk;
} }
} }
@@ -1343,7 +1343,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
if (operands[0].isReg() && operands[1].isReg()) { if (operands[0].isReg() && operands[1].isReg()) {
uint32_t size1 = operands[1].size(); uint32_t size1 = operands[1].x86RmSize();
uint32_t size0 = size1 >> shift; uint32_t size0 = size1 >> shift;
out->_operands[0].reset(W, size0); out->_operands[0].reset(W, size0);
@@ -1370,7 +1370,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
if (operands[0].isReg() && operands[1].isMem()) { if (operands[0].isReg() && operands[1].isMem()) {
uint32_t size1 = operands[1].size() ? operands[1].size() : uint32_t(16); uint32_t size1 = operands[1].x86RmSize() ? operands[1].x86RmSize() : uint32_t(16);
uint32_t size0 = size1 >> shift; uint32_t size0 = size1 >> shift;
out->_operands[0].reset(W, size0); out->_operands[0].reset(W, size0);
@@ -1379,7 +1379,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
} }
if (operands[0].isMem() && operands[1].isReg()) { if (operands[0].isMem() && operands[1].isReg()) {
uint32_t size1 = operands[1].size(); uint32_t size1 = operands[1].x86RmSize();
uint32_t size0 = size1 >> shift; uint32_t size0 = size1 >> shift;
out->_operands[0].reset(W | MibRead, size0); out->_operands[0].reset(W | MibRead, size0);
@@ -1420,7 +1420,7 @@ Error InstInternal::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_*
out->_operands[2].reset(); out->_operands[2].reset();
} }
uint32_t size0 = operands[0].size(); uint32_t size0 = operands[0].x86RmSize();
uint32_t size1 = size0 >> shift; uint32_t size1 = size0 >> shift;
out->_operands[0].reset(W, size0); out->_operands[0].reset(W, size0);

View File

@@ -831,6 +831,24 @@ public:
//! \} //! \}
//! \name Memory Size
//! \{
//! Tests whether the memory operand specifies a size (i.e. the size is not zero).
ASMJIT_INLINE_NODEBUG constexpr bool hasSize() const noexcept { return _signature.hasField<Signature::kSizeMask>(); }
//! Tests whether the memory operand size matches size `s`.
ASMJIT_INLINE_NODEBUG constexpr bool hasSize(uint32_t s) const noexcept { return size() == s; }
//! Returns the size of the memory operand in bytes.
//!
//! \note Most instructions would deduce the size of the memory operand, so in most cases it's expected that the
//! returned value would be zero. However, some instruction require the size to select between multiple variations,
//! so in some cases size is required would would be non-zero (for example `inc [mem], immediate` requires size to
//! distinguish between 8-bit, 16-bit, 32-bit, and 64-bit increments.
ASMJIT_INLINE_NODEBUG constexpr uint32_t size() const noexcept { return _signature.getField<Signature::kSizeMask>(); }
//! \}
//! \name Address Type //! \name Address Type
//! \{ //! \{