mirror of
https://github.com/asmjit/asmjit.git
synced 2025-12-18 13:04:36 +03:00
Added parameter pack to function arguments and return values
This commit is contained in:
@@ -281,8 +281,10 @@ public:
|
||||
// Let's round the size of `LabelEntry` to 64 bytes (as `ZoneAllocator` has
|
||||
// granularity of 32 bytes anyway). This gives `_name` the remaining space,
|
||||
// which is should be 16 bytes on 64-bit and 28 bytes on 32-bit architectures.
|
||||
static constexpr uint32_t kStaticNameSize =
|
||||
64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*));
|
||||
enum : uint32_t {
|
||||
kStaticNameSize =
|
||||
64 - (sizeof(ZoneHashNode) + 8 + sizeof(Section*) + sizeof(size_t) + sizeof(LabelLink*))
|
||||
};
|
||||
|
||||
//! Label type, see `Label::LabelType`.
|
||||
uint8_t _type;
|
||||
|
||||
@@ -59,26 +59,6 @@ class GlobalConstPoolPass : public Pass {
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::InvokeNode - Arg / Ret]
|
||||
// ============================================================================
|
||||
|
||||
bool InvokeNode::_setArg(uint32_t i, const Operand_& op) noexcept {
|
||||
if ((i & ~kFuncArgHi) >= _funcDetail.argCount())
|
||||
return false;
|
||||
|
||||
_args[i] = op;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool InvokeNode::_setRet(uint32_t i, const Operand_& op) noexcept {
|
||||
if (i >= 2)
|
||||
return false;
|
||||
|
||||
_rets[i] = op;
|
||||
return true;
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::BaseCompiler - Construction / Destruction]
|
||||
// ============================================================================
|
||||
@@ -130,10 +110,10 @@ Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature)
|
||||
// Allocate space for function arguments.
|
||||
funcNode->_args = nullptr;
|
||||
if (funcNode->argCount() != 0) {
|
||||
funcNode->_args = _allocator.allocT<VirtReg*>(funcNode->argCount() * sizeof(VirtReg*));
|
||||
funcNode->_args = _allocator.allocT<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
|
||||
if (ASMJIT_UNLIKELY(!funcNode->_args))
|
||||
return reportError(DebugUtils::errored(kErrorOutOfMemory));
|
||||
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(VirtReg*));
|
||||
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
|
||||
}
|
||||
|
||||
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
|
||||
@@ -203,7 +183,7 @@ Error BaseCompiler::endFunc() {
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) {
|
||||
Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
|
||||
FuncNode* func = _func;
|
||||
|
||||
if (ASMJIT_UNLIKELY(!func))
|
||||
@@ -213,7 +193,7 @@ Error BaseCompiler::setArg(uint32_t argIndex, const BaseReg& r) {
|
||||
return reportError(DebugUtils::errored(kErrorInvalidVirtId));
|
||||
|
||||
VirtReg* vReg = virtRegByReg(r);
|
||||
func->setArg(argIndex, vReg);
|
||||
func->setArg(argIndex, valueIndex, vReg);
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
@@ -237,10 +217,10 @@ Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Oper
|
||||
// Skip the allocation if there are no arguments.
|
||||
uint32_t argCount = signature.argCount();
|
||||
if (argCount) {
|
||||
node->_args = static_cast<Operand*>(_allocator.alloc(argCount * sizeof(Operand)));
|
||||
node->_args = static_cast<InvokeNode::OperandPack*>(_allocator.alloc(argCount * sizeof(InvokeNode::OperandPack)));
|
||||
if (!node->_args)
|
||||
reportError(DebugUtils::errored(kErrorOutOfMemory));
|
||||
memset(node->_args, 0, argCount * sizeof(Operand));
|
||||
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
|
||||
}
|
||||
|
||||
*out = node;
|
||||
|
||||
@@ -269,8 +269,12 @@ public:
|
||||
//! Emits a sentinel that marks the end of the current function.
|
||||
ASMJIT_API Error endFunc();
|
||||
|
||||
ASMJIT_API Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
|
||||
|
||||
//! Sets a function argument at `argIndex` to `reg`.
|
||||
ASMJIT_API Error setArg(uint32_t argIndex, const BaseReg& reg);
|
||||
inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
|
||||
//! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
|
||||
inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
|
||||
|
||||
inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
|
||||
FuncRetNode* node;
|
||||
@@ -573,6 +577,19 @@ class FuncNode : public LabelNode {
|
||||
public:
|
||||
ASMJIT_NONCOPYABLE(FuncNode)
|
||||
|
||||
//! Arguments pack.
|
||||
struct ArgPack {
|
||||
VirtReg* _data[Globals::kMaxValuePack];
|
||||
|
||||
inline void reset() noexcept {
|
||||
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
|
||||
_data[valueIndex] = nullptr;
|
||||
}
|
||||
|
||||
inline VirtReg*& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
|
||||
inline VirtReg* const& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
|
||||
};
|
||||
|
||||
//! Function detail.
|
||||
FuncDetail _funcDetail;
|
||||
//! Function frame.
|
||||
@@ -581,8 +598,9 @@ public:
|
||||
LabelNode* _exitNode;
|
||||
//! Function end (sentinel).
|
||||
SentinelNode* _end;
|
||||
//! Arguments array as `VirtReg`.
|
||||
VirtReg** _args;
|
||||
|
||||
//! Argument packs.
|
||||
ArgPack* _args;
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
@@ -623,30 +641,42 @@ public:
|
||||
//! Returns function frame.
|
||||
inline const FuncFrame& frame() const noexcept { return _frame; }
|
||||
|
||||
//! Tests whether the function has a return value.
|
||||
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
|
||||
//! Returns arguments count.
|
||||
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
|
||||
//! Returns returns count.
|
||||
inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
|
||||
|
||||
//! Returns arguments list.
|
||||
inline VirtReg** args() const noexcept { return _args; }
|
||||
//! Returns argument packs.
|
||||
inline ArgPack* argPacks() const noexcept { return _args; }
|
||||
|
||||
//! Returns argument at `i`.
|
||||
inline VirtReg* arg(uint32_t i) const noexcept {
|
||||
ASMJIT_ASSERT(i < argCount());
|
||||
return _args[i];
|
||||
//! Returns argument pack at `argIndex`.
|
||||
inline ArgPack& argPack(size_t argIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
return _args[argIndex];
|
||||
}
|
||||
|
||||
//! Sets argument at `i`.
|
||||
inline void setArg(uint32_t i, VirtReg* vReg) noexcept {
|
||||
ASMJIT_ASSERT(i < argCount());
|
||||
_args[i] = vReg;
|
||||
//! Sets argument at `argIndex`.
|
||||
inline void setArg(size_t argIndex, VirtReg* vReg) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
_args[argIndex][0] = vReg;
|
||||
}
|
||||
|
||||
//! Resets argument at `i`.
|
||||
inline void resetArg(uint32_t i) noexcept {
|
||||
ASMJIT_ASSERT(i < argCount());
|
||||
_args[i] = nullptr;
|
||||
//! Sets argument at `argIndex` and `valueIndex`.
|
||||
inline void setArg(size_t argIndex, size_t valueIndex, VirtReg* vReg) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
_args[argIndex][valueIndex] = vReg;
|
||||
}
|
||||
|
||||
//! Resets argument pack at `argIndex`.
|
||||
inline void resetArg(size_t argIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
_args[argIndex].reset();
|
||||
}
|
||||
|
||||
//! Resets argument pack at `argIndex`.
|
||||
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
_args[argIndex][valueIndex] = nullptr;
|
||||
}
|
||||
|
||||
//! Returns function attributes.
|
||||
@@ -686,12 +716,40 @@ class InvokeNode : public InstNode {
|
||||
public:
|
||||
ASMJIT_NONCOPYABLE(InvokeNode)
|
||||
|
||||
//! Operand pack provides multiple operands that can be associated with a
|
||||
//! single return value of function argument. Sometims this is necessary to
|
||||
//! express an argument or return value that requires multiple registers, for
|
||||
//! example 64-bit value in 32-bit mode or passing / returning homogenous data
|
||||
//! structures.
|
||||
struct OperandPack {
|
||||
//! Operands.
|
||||
Operand_ _data[Globals::kMaxValuePack];
|
||||
|
||||
//! Reset the pack by resetting all operands in the pack.
|
||||
inline void reset() noexcept {
|
||||
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
|
||||
_data[valueIndex].reset();
|
||||
}
|
||||
|
||||
//! Returns an operand at the given `valueIndex`.
|
||||
inline Operand& operator[](size_t valueIndex) noexcept {
|
||||
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
|
||||
return _data[valueIndex].as<Operand>();
|
||||
}
|
||||
|
||||
//! Returns an operand at the given `valueIndex` (const).
|
||||
const inline Operand& operator[](size_t valueIndex) const noexcept {
|
||||
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
|
||||
return _data[valueIndex].as<Operand>();
|
||||
}
|
||||
};
|
||||
|
||||
//! Function detail.
|
||||
FuncDetail _funcDetail;
|
||||
//! Returns.
|
||||
Operand_ _rets[2];
|
||||
//! Arguments.
|
||||
Operand_* _args;
|
||||
//! Function return value(s).
|
||||
OperandPack _rets;
|
||||
//! Function arguments.
|
||||
OperandPack* _args;
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
@@ -703,8 +761,7 @@ public:
|
||||
_args(nullptr) {
|
||||
setType(kNodeInvoke);
|
||||
_resetOps();
|
||||
_rets[0].reset();
|
||||
_rets[1].reset();
|
||||
_rets.reset();
|
||||
addFlags(kFlagIsRemovable);
|
||||
}
|
||||
|
||||
@@ -728,45 +785,63 @@ public:
|
||||
//! \overload
|
||||
inline const Operand& target() const noexcept { return _opArray[0].as<Operand>(); }
|
||||
|
||||
//! Returns the number of function return values.
|
||||
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
|
||||
//! Returns the number of function arguments.
|
||||
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
|
||||
//! Returns the number of function return values.
|
||||
inline uint32_t retCount() const noexcept { return _funcDetail.retCount(); }
|
||||
|
||||
//! Returns the return value at `i`.
|
||||
inline Operand& ret(uint32_t i = 0) noexcept {
|
||||
ASMJIT_ASSERT(i < 2);
|
||||
return _rets[i].as<Operand>();
|
||||
//! Returns operand pack representing function return value(s).
|
||||
inline OperandPack& retPack() noexcept { return _rets; }
|
||||
//! Returns operand pack representing function return value(s).
|
||||
inline const OperandPack& retPack() const noexcept { return _rets; }
|
||||
|
||||
//! Returns the return value at the given `valueIndex`.
|
||||
inline Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
|
||||
//! \overload
|
||||
inline const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
|
||||
|
||||
//! Returns operand pack representing function return value(s).
|
||||
inline OperandPack& argPack(size_t argIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
return _args[argIndex];
|
||||
}
|
||||
//! \overload
|
||||
inline const Operand& ret(uint32_t i = 0) const noexcept {
|
||||
ASMJIT_ASSERT(i < 2);
|
||||
return _rets[i].as<Operand>();
|
||||
inline const OperandPack& argPack(size_t argIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
return _args[argIndex];
|
||||
}
|
||||
|
||||
//! Returns the function argument at `i`.
|
||||
inline Operand& arg(uint32_t i) noexcept {
|
||||
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
|
||||
return _args[i].as<Operand>();
|
||||
//! Returns a function argument at the given `argIndex`.
|
||||
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
return _args[argIndex][valueIndex];
|
||||
}
|
||||
//! \overload
|
||||
inline const Operand& arg(uint32_t i) const noexcept {
|
||||
ASMJIT_ASSERT(i < kFuncArgCountLoHi);
|
||||
return _args[i].as<Operand>();
|
||||
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
return _args[argIndex][valueIndex];
|
||||
}
|
||||
|
||||
//! Sets the function argument at `i` to `op`.
|
||||
ASMJIT_API bool _setArg(uint32_t i, const Operand_& op) noexcept;
|
||||
//! Sets the function return value at `i` to `op`.
|
||||
ASMJIT_API bool _setRet(uint32_t i, const Operand_& op) noexcept;
|
||||
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
|
||||
//! Sets the function argument at `i` to `op`.
|
||||
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < argCount());
|
||||
_args[argIndex][valueIndex] = op;
|
||||
}
|
||||
|
||||
//! Sets the function argument at `i` to `reg`.
|
||||
inline bool setArg(uint32_t i, const BaseReg& reg) noexcept { return _setArg(i, reg); }
|
||||
//! Sets the function argument at `i` to `imm`.
|
||||
inline bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); }
|
||||
//! Sets the function return value at `valueIndex` to `reg`.
|
||||
inline void setRet(size_t valueIndex, const BaseReg& reg) noexcept { _setRet(valueIndex, reg); }
|
||||
|
||||
//! Sets the function return value at `i` to `var`.
|
||||
inline bool setRet(uint32_t i, const BaseReg& reg) noexcept { return _setRet(i, reg); }
|
||||
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
|
||||
inline void setArg(size_t argIndex, const BaseReg& reg) noexcept { _setArg(argIndex, 0, reg); }
|
||||
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
|
||||
inline void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
|
||||
|
||||
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
|
||||
inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
|
||||
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
|
||||
inline void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
|
||||
|
||||
//! \}
|
||||
};
|
||||
|
||||
@@ -209,6 +209,8 @@ Error formatInstruction(
|
||||
}
|
||||
|
||||
#ifndef ASMJIT_NO_BUILDER
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
|
||||
uint32_t typeId = value.typeId();
|
||||
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
|
||||
@@ -237,31 +239,49 @@ static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
static Error formatFuncValuePack(
|
||||
String& sb,
|
||||
uint32_t formatFlags,
|
||||
const BaseEmitter* emitter,
|
||||
const FuncValuePack& pack,
|
||||
VirtReg* const* vRegs) noexcept {
|
||||
|
||||
size_t count = pack.count();
|
||||
if (!count)
|
||||
return sb.append("void");
|
||||
|
||||
if (count > 1)
|
||||
sb.append('[');
|
||||
|
||||
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
|
||||
const FuncValue& value = pack[valueIndex];
|
||||
if (!value)
|
||||
break;
|
||||
|
||||
if (valueIndex)
|
||||
ASMJIT_PROPAGATE(sb.append(", "));
|
||||
|
||||
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, value));
|
||||
|
||||
if (vRegs) {
|
||||
static const char nullRet[] = "<none>";
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[valueIndex] ? vRegs[valueIndex]->name() : nullRet));
|
||||
}
|
||||
}
|
||||
|
||||
if (count > 1)
|
||||
sb.append(']');
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
|
||||
static Error formatFuncRets(
|
||||
String& sb,
|
||||
uint32_t formatFlags,
|
||||
const BaseEmitter* emitter,
|
||||
const FuncDetail& fd,
|
||||
VirtReg* const* vRegs) noexcept {
|
||||
const FuncDetail& fd) noexcept {
|
||||
|
||||
if (!fd.hasRet())
|
||||
return sb.append("void");
|
||||
|
||||
for (uint32_t i = 0; i < fd.retCount(); i++) {
|
||||
if (i) ASMJIT_PROPAGATE(sb.append(", "));
|
||||
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, fd.ret(i)));
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
if (vRegs) {
|
||||
static const char nullRet[] = "<none>";
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullRet));
|
||||
}
|
||||
#else
|
||||
DebugUtils::unused(vRegs);
|
||||
#endif
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
return formatFuncValuePack(sb, formatFlags, emitter, fd.retPack(), nullptr);
|
||||
}
|
||||
|
||||
static Error formatFuncArgs(
|
||||
@@ -269,30 +289,22 @@ static Error formatFuncArgs(
|
||||
uint32_t formatFlags,
|
||||
const BaseEmitter* emitter,
|
||||
const FuncDetail& fd,
|
||||
VirtReg* const* vRegs) noexcept {
|
||||
const FuncNode::ArgPack* argPacks) noexcept {
|
||||
|
||||
uint32_t count = fd.argCount();
|
||||
if (!count)
|
||||
uint32_t argCount = fd.argCount();
|
||||
if (!argCount)
|
||||
return sb.append("void");
|
||||
|
||||
for (uint32_t i = 0; i < count; i++) {
|
||||
if (i)
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
if (argIndex)
|
||||
ASMJIT_PROPAGATE(sb.append(", "));
|
||||
|
||||
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, fd.arg(i)));
|
||||
|
||||
#ifndef ASMJIT_NO_COMPILER
|
||||
if (vRegs) {
|
||||
static const char nullArg[] = "<none>";
|
||||
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[i] ? vRegs[i]->name() : nullArg));
|
||||
}
|
||||
#else
|
||||
DebugUtils::unused(vRegs);
|
||||
#endif
|
||||
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, emitter, fd.argPack(argIndex), argPacks[argIndex]._data));
|
||||
}
|
||||
|
||||
return kErrorOk;
|
||||
}
|
||||
#endif
|
||||
|
||||
Error formatNode(
|
||||
String& sb,
|
||||
@@ -396,9 +408,9 @@ Error formatNode(
|
||||
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, funcNode->labelId()));
|
||||
ASMJIT_PROPAGATE(sb.append(": "));
|
||||
|
||||
ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail(), nullptr));
|
||||
ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail()));
|
||||
ASMJIT_PROPAGATE(sb.append(" Func("));
|
||||
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->args()));
|
||||
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->argPacks()));
|
||||
ASMJIT_PROPAGATE(sb.append(")"));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -55,19 +55,17 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
|
||||
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
|
||||
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
|
||||
|
||||
const uint8_t* args = signature.args();
|
||||
for (uint32_t i = 0; i < argCount; i++) {
|
||||
FuncValue& arg = _args[i];
|
||||
arg.initTypeId(Type::deabstract(args[i], deabstractDelta));
|
||||
const uint8_t* signatureArgs = signature.args();
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
FuncValuePack& argPack = _args[argIndex];
|
||||
argPack[0].initTypeId(Type::deabstract(signatureArgs[argIndex], deabstractDelta));
|
||||
}
|
||||
_argCount = uint8_t(argCount);
|
||||
_vaIndex = uint8_t(signature.vaIndex());
|
||||
|
||||
uint32_t ret = signature.ret();
|
||||
if (ret != Type::kIdVoid) {
|
||||
if (ret != Type::kIdVoid)
|
||||
_rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
|
||||
_retCount = 1;
|
||||
}
|
||||
|
||||
#ifdef ASMJIT_BUILD_X86
|
||||
if (environment.isFamilyX86())
|
||||
|
||||
@@ -36,31 +36,6 @@ ASMJIT_BEGIN_NAMESPACE
|
||||
//! \addtogroup asmjit_function
|
||||
//! \{
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::FuncArgIndex]
|
||||
// ============================================================================
|
||||
|
||||
//! Function argument index (lo/hi).
|
||||
enum FuncArgIndex : uint32_t {
|
||||
//! Maximum number of function arguments supported by AsmJit.
|
||||
kFuncArgCount = Globals::kMaxFuncArgs,
|
||||
//! Extended maximum number of arguments (used internally).
|
||||
kFuncArgCountLoHi = kFuncArgCount * 2,
|
||||
|
||||
//! Index to the LO part of function argument (default).
|
||||
//!
|
||||
//! This value is typically omitted and added only if there is HI argument
|
||||
//! accessed.
|
||||
kFuncArgLo = 0,
|
||||
|
||||
//! Index to the HI part of function argument.
|
||||
//!
|
||||
//! HI part of function argument depends on target architecture. On x86 it's
|
||||
//! typically used to transfer 64-bit integers (they form a pair of 32-bit
|
||||
//! integers).
|
||||
kFuncArgHi = kFuncArgCount
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::FuncSignature]
|
||||
// ============================================================================
|
||||
@@ -161,7 +136,7 @@ public:
|
||||
//! Function signature builder.
|
||||
class FuncSignatureBuilder : public FuncSignature {
|
||||
public:
|
||||
uint8_t _builderArgList[kFuncArgCount];
|
||||
uint8_t _builderArgList[Globals::kMaxFuncArgs];
|
||||
|
||||
//! \name Initializtion & Reset
|
||||
//! \{
|
||||
@@ -192,7 +167,7 @@ public:
|
||||
|
||||
//! Appends an argument of `type` to the function prototype.
|
||||
inline void addArg(uint32_t type) noexcept {
|
||||
ASMJIT_ASSERT(_argCount < kFuncArgCount);
|
||||
ASMJIT_ASSERT(_argCount < Globals::kMaxFuncArgs);
|
||||
_builderArgList[_argCount++] = uint8_t(type);
|
||||
}
|
||||
//! Appends an argument of type based on `T` to the function prototype.
|
||||
@@ -206,8 +181,8 @@ public:
|
||||
// [asmjit::FuncValue]
|
||||
// ============================================================================
|
||||
|
||||
//! Argument or return value as defined by `FuncSignature`, but with register
|
||||
//! or stack address (and other metadata) assigned to it.
|
||||
//! Argument or return value (or its part) as defined by `FuncSignature`, but
|
||||
//! with register or stack address (and other metadata) assigned.
|
||||
struct FuncValue {
|
||||
uint32_t _data;
|
||||
|
||||
@@ -276,6 +251,8 @@ struct FuncValue {
|
||||
//! \name Accessors
|
||||
//! \{
|
||||
|
||||
inline explicit operator bool() const noexcept { return _data != 0; }
|
||||
|
||||
inline void _replaceValue(uint32_t mask, uint32_t value) noexcept { _data = (_data & ~mask) | value; }
|
||||
|
||||
//! Tests whether the `FuncValue` has a flag `flag` set.
|
||||
@@ -324,6 +301,72 @@ struct FuncValue {
|
||||
//! \}
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::FuncValuePack]
|
||||
// ============================================================================
|
||||
|
||||
//! Contains multiple `FuncValue` instances in an array so functions that use
|
||||
//! multiple registers for arguments or return values can represent all inputs
|
||||
//! and outputs.
|
||||
struct FuncValuePack {
|
||||
public:
|
||||
//! Values data.
|
||||
FuncValue _values[Globals::kMaxValuePack];
|
||||
|
||||
inline void reset() noexcept {
|
||||
for (size_t i = 0; i < Globals::kMaxValuePack; i++)
|
||||
_values[i].reset();
|
||||
}
|
||||
|
||||
//! Calculates how many values are in the pack, checking for non-values
|
||||
//! from the end.
|
||||
inline uint32_t count() const noexcept {
|
||||
uint32_t n = Globals::kMaxValuePack;
|
||||
while (n && !_values[n - 1])
|
||||
n--;
|
||||
return n;
|
||||
}
|
||||
|
||||
inline FuncValue* values() noexcept { return _values; }
|
||||
inline const FuncValue* values() const noexcept { return _values; }
|
||||
|
||||
inline void resetValue(size_t index) noexcept {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
_values[index].reset();
|
||||
}
|
||||
|
||||
inline bool hasValue(size_t index) noexcept {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
return _values[index].isInitialized();
|
||||
}
|
||||
|
||||
inline void assignReg(size_t index, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
ASMJIT_ASSERT(reg.isPhysReg());
|
||||
_values[index].initReg(reg.type(), reg.id(), typeId);
|
||||
}
|
||||
|
||||
inline void assignReg(size_t index, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
_values[index].initReg(regType, regId, typeId);
|
||||
}
|
||||
|
||||
inline void assignStack(size_t index, int32_t offset, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
_values[index].initStack(offset, typeId);
|
||||
}
|
||||
|
||||
inline FuncValue& operator[](size_t index) {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
return _values[index];
|
||||
}
|
||||
|
||||
inline const FuncValue& operator[](size_t index) const {
|
||||
ASMJIT_ASSERT(index < Globals::kMaxValuePack);
|
||||
return _values[index];
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::FuncDetail]
|
||||
// ============================================================================
|
||||
@@ -339,20 +382,18 @@ public:
|
||||
CallConv _callConv;
|
||||
//! Number of function arguments.
|
||||
uint8_t _argCount;
|
||||
//! Number of function return values.
|
||||
uint8_t _retCount;
|
||||
//! Variable arguments index of `kNoVarArgs`.
|
||||
uint8_t _vaIndex;
|
||||
//! Reserved for future use.
|
||||
uint8_t _reserved;
|
||||
uint16_t _reserved;
|
||||
//! Registers that contains arguments.
|
||||
uint32_t _usedRegs[BaseReg::kGroupVirt];
|
||||
//! Size of arguments passed by stack.
|
||||
uint32_t _argStackSize;
|
||||
//! Function return values.
|
||||
FuncValue _rets[2];
|
||||
//! Function return value(s).
|
||||
FuncValuePack _rets;
|
||||
//! Function arguments.
|
||||
FuncValue _args[kFuncArgCountLoHi];
|
||||
FuncValuePack _args[Globals::kMaxFuncArgs];
|
||||
|
||||
enum : uint8_t {
|
||||
//! Doesn't have variable number of arguments (`...`).
|
||||
@@ -382,52 +423,61 @@ public:
|
||||
//! Checks whether a CallConv `flag` is set, see `CallConv::Flags`.
|
||||
inline bool hasFlag(uint32_t ccFlag) const noexcept { return _callConv.hasFlag(ccFlag); }
|
||||
|
||||
//! Returns count of function return values.
|
||||
inline uint32_t retCount() const noexcept { return _retCount; }
|
||||
//! Tests whether the function has a return value.
|
||||
inline bool hasRet() const noexcept { return bool(_rets[0]); }
|
||||
//! Returns the number of function arguments.
|
||||
inline uint32_t argCount() const noexcept { return _argCount; }
|
||||
|
||||
//! Tests whether the function has a return value.
|
||||
inline bool hasRet() const noexcept { return _retCount != 0; }
|
||||
//! Returns function return value associated with the given `index`.
|
||||
inline FuncValue& ret(uint32_t index = 0) noexcept {
|
||||
ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
|
||||
return _rets[index];
|
||||
}
|
||||
//! Returns function return value associated with the given `index` (const).
|
||||
inline const FuncValue& ret(uint32_t index = 0) const noexcept {
|
||||
ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_rets));
|
||||
return _rets[index];
|
||||
//! Returns function return values.
|
||||
inline FuncValuePack& retPack() noexcept { return _rets; }
|
||||
//! Returns function return values.
|
||||
inline const FuncValuePack& retPack() const noexcept { return _rets; }
|
||||
|
||||
//! Returns a function return value associated with the given `valueIndex`.
|
||||
inline FuncValue& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
|
||||
//! Returns a function return value associated with the given `valueIndex` (const).
|
||||
inline const FuncValue& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
|
||||
|
||||
//! Returns function argument packs array.
|
||||
inline FuncValuePack* argPacks() noexcept { return _args; }
|
||||
//! Returns function argument packs array (const).
|
||||
inline const FuncValuePack* argPacks() const noexcept { return _args; }
|
||||
|
||||
//! Returns function argument pack at the given `argIndex`.
|
||||
inline FuncValuePack& argPack(size_t argIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
|
||||
return _args[argIndex];
|
||||
}
|
||||
|
||||
//! Returns function arguments array.
|
||||
inline FuncValue* args() noexcept { return _args; }
|
||||
//! Returns function arguments array (const).
|
||||
inline const FuncValue* args() const noexcept { return _args; }
|
||||
|
||||
inline bool hasArg(uint32_t index) const noexcept {
|
||||
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
|
||||
return _args[index].isInitialized();
|
||||
//! Returns function argument pack at the given `argIndex` (const).
|
||||
inline const FuncValuePack& argPack(size_t argIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
|
||||
return _args[argIndex];
|
||||
}
|
||||
|
||||
//! Returns function argument at the given `index`.
|
||||
inline FuncValue& arg(uint32_t index) noexcept {
|
||||
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
|
||||
return _args[index];
|
||||
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex`.
|
||||
inline FuncValue& arg(size_t argIndex, size_t valueIndex = 0) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
|
||||
return _args[argIndex][valueIndex];
|
||||
}
|
||||
|
||||
//! Returnsfunction argument at the given index `index` (const).
|
||||
inline const FuncValue& arg(uint32_t index) const noexcept {
|
||||
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
|
||||
return _args[index];
|
||||
//! Returns an argument at `valueIndex` from the argument pack at the given `argIndex` (const).
|
||||
inline const FuncValue& arg(size_t argIndex, size_t valueIndex = 0) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
|
||||
return _args[argIndex][valueIndex];
|
||||
}
|
||||
|
||||
inline void resetArg(uint32_t index) noexcept {
|
||||
ASMJIT_ASSERT(index < kFuncArgCountLoHi);
|
||||
_args[index].reset();
|
||||
//! Resets an argument at the given `argIndex`.
|
||||
//!
|
||||
//! If the argument is a parameter pack (has multiple values) all values are reset.
|
||||
inline void resetArg(size_t argIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < Globals::kMaxFuncArgs);
|
||||
_args[argIndex].reset();
|
||||
}
|
||||
|
||||
//! Tests whether the function has variable arguments.
|
||||
inline bool hasVarArgs() const noexcept { return _vaIndex != kNoVarArgs; }
|
||||
//! Returns an index of a first variable argument.
|
||||
inline uint32_t vaIndex() const noexcept { return _vaIndex; }
|
||||
|
||||
//! Tests whether the function passes one or more argument by stack.
|
||||
@@ -435,18 +485,25 @@ public:
|
||||
//! Returns stack size needed for function arguments passed on the stack.
|
||||
inline uint32_t argStackSize() const noexcept { return _argStackSize; }
|
||||
|
||||
//! Returns red zone size.
|
||||
inline uint32_t redZoneSize() const noexcept { return _callConv.redZoneSize(); }
|
||||
//! Returns spill zone size.
|
||||
inline uint32_t spillZoneSize() const noexcept { return _callConv.spillZoneSize(); }
|
||||
//! Returns natural stack alignment.
|
||||
inline uint32_t naturalStackAlignment() const noexcept { return _callConv.naturalStackAlignment(); }
|
||||
|
||||
//! Returns a mask of all passed registers of the given register `group`.
|
||||
inline uint32_t passedRegs(uint32_t group) const noexcept { return _callConv.passedRegs(group); }
|
||||
//! Returns a mask of all preserved registers of the given register `group`.
|
||||
inline uint32_t preservedRegs(uint32_t group) const noexcept { return _callConv.preservedRegs(group); }
|
||||
|
||||
//! Returns a mask of all used registers of the given register `group`.
|
||||
inline uint32_t usedRegs(uint32_t group) const noexcept {
|
||||
ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
|
||||
return _usedRegs[group];
|
||||
}
|
||||
|
||||
//! Adds `regs` to the mask of used registers of the given register `group`.
|
||||
inline void addUsedRegs(uint32_t group, uint32_t regs) noexcept {
|
||||
ASMJIT_ASSERT(group < BaseReg::kGroupVirt);
|
||||
_usedRegs[group] |= regs;
|
||||
@@ -873,7 +930,7 @@ public:
|
||||
//! Reserved for future use.
|
||||
uint8_t _reserved[3];
|
||||
//! Mapping of each function argument.
|
||||
FuncValue _args[kFuncArgCountLoHi];
|
||||
FuncValuePack _argPacks[Globals::kMaxFuncArgs];
|
||||
|
||||
//! \name Construction & Destruction
|
||||
//! \{
|
||||
@@ -888,7 +945,7 @@ public:
|
||||
_funcDetail = fd;
|
||||
_saRegId = uint8_t(BaseReg::kIdBad);
|
||||
memset(_reserved, 0, sizeof(_reserved));
|
||||
memset(_args, 0, sizeof(_args));
|
||||
memset(_argPacks, 0, sizeof(_argPacks));
|
||||
}
|
||||
|
||||
//! \}
|
||||
@@ -904,46 +961,62 @@ public:
|
||||
inline void setSARegId(uint32_t regId) { _saRegId = uint8_t(regId); }
|
||||
inline void resetSARegId() { _saRegId = uint8_t(BaseReg::kIdBad); }
|
||||
|
||||
inline FuncValue& arg(uint32_t index) noexcept {
|
||||
ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
|
||||
return _args[index];
|
||||
inline FuncValue& arg(size_t argIndex, size_t valueIndex) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
return _argPacks[argIndex][valueIndex];
|
||||
}
|
||||
inline const FuncValue& arg(uint32_t index) const noexcept {
|
||||
ASMJIT_ASSERT(index < ASMJIT_ARRAY_SIZE(_args));
|
||||
return _args[index];
|
||||
inline const FuncValue& arg(size_t argIndex, size_t valueIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
return _argPacks[argIndex][valueIndex];
|
||||
}
|
||||
|
||||
inline bool isAssigned(uint32_t argIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
|
||||
return _args[argIndex].isAssigned();
|
||||
inline bool isAssigned(size_t argIndex, size_t valueIndex) const noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
return _argPacks[argIndex][valueIndex].isAssigned();
|
||||
}
|
||||
|
||||
inline void assignReg(uint32_t argIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
|
||||
inline void assignReg(size_t argIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
ASMJIT_ASSERT(reg.isPhysReg());
|
||||
_args[argIndex].initReg(reg.type(), reg.id(), typeId);
|
||||
_argPacks[argIndex][0].initReg(reg.type(), reg.id(), typeId);
|
||||
}
|
||||
|
||||
inline void assignReg(uint32_t argIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
|
||||
_args[argIndex].initReg(regType, regId, typeId);
|
||||
inline void assignReg(size_t argIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
_argPacks[argIndex][0].initReg(regType, regId, typeId);
|
||||
}
|
||||
|
||||
inline void assignStack(uint32_t argIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_args));
|
||||
_args[argIndex].initStack(offset, typeId);
|
||||
inline void assignStack(size_t argIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
_argPacks[argIndex][0].initStack(offset, typeId);
|
||||
}
|
||||
|
||||
inline void assignRegInPack(size_t argIndex, size_t valueIndex, const BaseReg& reg, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
ASMJIT_ASSERT(reg.isPhysReg());
|
||||
_argPacks[argIndex][valueIndex].initReg(reg.type(), reg.id(), typeId);
|
||||
}
|
||||
|
||||
inline void assignRegInPack(size_t argIndex, size_t valueIndex, uint32_t regType, uint32_t regId, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
_argPacks[argIndex][valueIndex].initReg(regType, regId, typeId);
|
||||
}
|
||||
|
||||
inline void assignStackInPack(size_t argIndex, size_t valueIndex, int32_t offset, uint32_t typeId = Type::kIdVoid) noexcept {
|
||||
ASMJIT_ASSERT(argIndex < ASMJIT_ARRAY_SIZE(_argPacks));
|
||||
_argPacks[argIndex][valueIndex].initStack(offset, typeId);
|
||||
}
|
||||
|
||||
// NOTE: All `assignAll()` methods are shortcuts to assign all arguments at
|
||||
// once, however, since registers are passed all at once these initializers
|
||||
// don't provide any way to pass TypeId and/or to keep any argument between
|
||||
// the arguments passed unassigned.
|
||||
inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg) noexcept {
|
||||
inline void _assignAllInternal(size_t argIndex, const BaseReg& reg) noexcept {
|
||||
assignReg(argIndex, reg);
|
||||
}
|
||||
|
||||
template<typename... Args>
|
||||
inline void _assignAllInternal(uint32_t argIndex, const BaseReg& reg, Args&&... args) noexcept {
|
||||
inline void _assignAllInternal(size_t argIndex, const BaseReg& reg, Args&&... args) noexcept {
|
||||
assignReg(argIndex, reg);
|
||||
_assignAllInternal(argIndex + 1, std::forward<Args>(args)...);
|
||||
}
|
||||
|
||||
@@ -80,13 +80,13 @@ namespace Globals {
|
||||
// ============================================================================
|
||||
|
||||
//! Host memory allocator overhead.
|
||||
constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
|
||||
static constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
|
||||
|
||||
//! Host memory allocator alignment.
|
||||
constexpr uint32_t kAllocAlignment = 8;
|
||||
static constexpr uint32_t kAllocAlignment = 8;
|
||||
|
||||
//! Aggressive growing strategy threshold.
|
||||
constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
|
||||
static constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
|
||||
|
||||
//! Maximum depth of RB-Tree is:
|
||||
//!
|
||||
@@ -99,37 +99,41 @@ constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
|
||||
//!
|
||||
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch.
|
||||
//! The final value was adjusted by +1 for safety reasons.
|
||||
constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
|
||||
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
|
||||
|
||||
//! Maximum number of operands per a single instruction.
|
||||
constexpr uint32_t kMaxOpCount = 6;
|
||||
static constexpr uint32_t kMaxOpCount = 6;
|
||||
|
||||
//! Maximum arguments of a function supported by the Compiler / Function API.
|
||||
constexpr uint32_t kMaxFuncArgs = 16;
|
||||
static constexpr uint32_t kMaxFuncArgs = 16;
|
||||
|
||||
//! The number of values that can be assigned to a single function argument or
|
||||
//! return value.
|
||||
static constexpr uint32_t kMaxValuePack = 4;
|
||||
|
||||
//! Maximum number of physical registers AsmJit can use per register group.
|
||||
constexpr uint32_t kMaxPhysRegs = 32;
|
||||
static constexpr uint32_t kMaxPhysRegs = 32;
|
||||
|
||||
//! Maximum alignment.
|
||||
constexpr uint32_t kMaxAlignment = 64;
|
||||
static constexpr uint32_t kMaxAlignment = 64;
|
||||
|
||||
//! Maximum label or symbol size in bytes.
|
||||
constexpr uint32_t kMaxLabelNameSize = 2048;
|
||||
static constexpr uint32_t kMaxLabelNameSize = 2048;
|
||||
|
||||
//! Maximum section name size.
|
||||
constexpr uint32_t kMaxSectionNameSize = 35;
|
||||
static constexpr uint32_t kMaxSectionNameSize = 35;
|
||||
|
||||
//! Maximum size of comment.
|
||||
constexpr uint32_t kMaxCommentSize = 1024;
|
||||
static constexpr uint32_t kMaxCommentSize = 1024;
|
||||
|
||||
//! Invalid identifier.
|
||||
constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
|
||||
static constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
|
||||
|
||||
//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
|
||||
constexpr uint32_t kNotFound = 0xFFFFFFFFu;
|
||||
static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
|
||||
|
||||
//! Invalid base address.
|
||||
constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
|
||||
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
|
||||
|
||||
// ============================================================================
|
||||
// [asmjit::Globals::ResetPolicy]
|
||||
|
||||
@@ -957,6 +957,8 @@ public:
|
||||
|
||||
//! Argument index (or `kNoArgIndex` if none).
|
||||
uint8_t _argIndex;
|
||||
//! Argument value index in the pack (0 by default).
|
||||
uint8_t _argValueIndex;
|
||||
//! Global home register ID (if any, assigned by RA).
|
||||
uint8_t _homeRegId;
|
||||
//! Global hint register ID (provided by RA or user).
|
||||
@@ -1006,6 +1008,7 @@ public:
|
||||
_clobberSurvivalMask(0),
|
||||
_regByteMask(0),
|
||||
_argIndex(kNoArgIndex),
|
||||
_argValueIndex(0),
|
||||
_homeRegId(BaseReg::kIdBad),
|
||||
_hintRegId(BaseReg::kIdBad),
|
||||
_liveSpans(),
|
||||
@@ -1060,7 +1063,12 @@ public:
|
||||
|
||||
inline bool hasArgIndex() const noexcept { return _argIndex != kNoArgIndex; }
|
||||
inline uint32_t argIndex() const noexcept { return _argIndex; }
|
||||
inline void setArgIndex(uint32_t index) noexcept { _argIndex = uint8_t(index); }
|
||||
inline uint32_t argValueIndex() const noexcept { return _argValueIndex; }
|
||||
|
||||
inline void setArgIndex(uint32_t argIndex, uint32_t valueIndex) noexcept {
|
||||
_argIndex = uint8_t(argIndex);
|
||||
_argValueIndex = uint8_t(valueIndex);
|
||||
}
|
||||
|
||||
inline bool hasHomeRegId() const noexcept { return _homeRegId != BaseReg::kIdBad; }
|
||||
inline uint32_t homeRegId() const noexcept { return _homeRegId; }
|
||||
|
||||
@@ -80,55 +80,59 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
|
||||
uint32_t numIter = 1;
|
||||
|
||||
for (uint32_t iter = 0; iter < numIter; iter++) {
|
||||
for (uint32_t i = 0; i < argCount; i++) {
|
||||
// Unassigned argument.
|
||||
VirtReg* virtReg = func->arg(i);
|
||||
if (!virtReg) continue;
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
// Unassigned argument.
|
||||
VirtReg* virtReg = func->argPack(argIndex)[valueIndex];
|
||||
if (!virtReg)
|
||||
continue;
|
||||
|
||||
// Unreferenced argument.
|
||||
RAWorkReg* workReg = virtReg->workReg();
|
||||
if (!workReg) continue;
|
||||
// Unreferenced argument.
|
||||
RAWorkReg* workReg = virtReg->workReg();
|
||||
if (!workReg)
|
||||
continue;
|
||||
|
||||
// Overwritten argument.
|
||||
uint32_t workId = workReg->workId();
|
||||
if (!liveIn.bitAt(workId))
|
||||
continue;
|
||||
// Overwritten argument.
|
||||
uint32_t workId = workReg->workId();
|
||||
if (!liveIn.bitAt(workId))
|
||||
continue;
|
||||
|
||||
uint32_t group = workReg->group();
|
||||
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
|
||||
continue;
|
||||
uint32_t group = workReg->group();
|
||||
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
|
||||
continue;
|
||||
|
||||
uint32_t allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
|
||||
if (iter == 0) {
|
||||
// First iteration: Try to allocate to home RegId.
|
||||
if (workReg->hasHomeRegId()) {
|
||||
uint32_t physId = workReg->homeRegId();
|
||||
if (Support::bitTest(allocableRegs, physId)) {
|
||||
_curAssignment.assign(group, workId, physId, true);
|
||||
_pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
|
||||
continue;
|
||||
uint32_t allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
|
||||
if (iter == 0) {
|
||||
// First iteration: Try to allocate to home RegId.
|
||||
if (workReg->hasHomeRegId()) {
|
||||
uint32_t physId = workReg->homeRegId();
|
||||
if (Support::bitTest(allocableRegs, physId)) {
|
||||
_curAssignment.assign(group, workId, physId, true);
|
||||
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->info().type(), physId, workReg->typeId());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
numIter = 2;
|
||||
}
|
||||
else {
|
||||
// Second iteration: Pick any other register if the is an unassigned one or assign to stack.
|
||||
if (allocableRegs) {
|
||||
uint32_t physId = Support::ctz(allocableRegs);
|
||||
_curAssignment.assign(group, workId, physId, true);
|
||||
_pass->_argsAssignment.assignReg(i, workReg->info().type(), physId, workReg->typeId());
|
||||
numIter = 2;
|
||||
}
|
||||
else {
|
||||
// This register will definitely need stack, create the slot now and assign also `argIndex`
|
||||
// to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
|
||||
RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
|
||||
if (ASMJIT_UNLIKELY(!slot))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
// Second iteration: Pick any other register if the is an unassigned one or assign to stack.
|
||||
if (allocableRegs) {
|
||||
uint32_t physId = Support::ctz(allocableRegs);
|
||||
_curAssignment.assign(group, workId, physId, true);
|
||||
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->info().type(), physId, workReg->typeId());
|
||||
}
|
||||
else {
|
||||
// This register will definitely need stack, create the slot now and assign also `argIndex`
|
||||
// to it. We will patch `_argsAssignment` later after RAStackAllocator finishes.
|
||||
RAStackSlot* slot = _pass->getOrCreateStackSlot(workReg);
|
||||
if (ASMJIT_UNLIKELY(!slot))
|
||||
return DebugUtils::errored(kErrorOutOfMemory);
|
||||
|
||||
// This means STACK_ARG may be moved to STACK.
|
||||
workReg->addFlags(RAWorkReg::kFlagStackArgToStack);
|
||||
_pass->_numStackArgsToStackSlots++;
|
||||
// This means STACK_ARG may be moved to STACK.
|
||||
workReg->addFlags(RAWorkReg::kFlagStackArgToStack);
|
||||
_pass->_numStackArgsToStackSlots++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1110,25 +1110,29 @@ Error RAPass::assignArgIndexToWorkRegs() noexcept {
|
||||
ZoneBitVector& liveIn = entryBlock()->liveIn();
|
||||
uint32_t argCount = func()->argCount();
|
||||
|
||||
for (uint32_t i = 0; i < argCount; i++) {
|
||||
// Unassigned argument.
|
||||
VirtReg* virtReg = func()->arg(i);
|
||||
if (!virtReg) continue;
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
// Unassigned argument.
|
||||
VirtReg* virtReg = func()->argPack(argIndex)[valueIndex];
|
||||
if (!virtReg)
|
||||
continue;
|
||||
|
||||
// Unreferenced argument.
|
||||
RAWorkReg* workReg = virtReg->workReg();
|
||||
if (!workReg) continue;
|
||||
// Unreferenced argument.
|
||||
RAWorkReg* workReg = virtReg->workReg();
|
||||
if (!workReg)
|
||||
continue;
|
||||
|
||||
// Overwritten argument.
|
||||
uint32_t workId = workReg->workId();
|
||||
if (!liveIn.bitAt(workId))
|
||||
continue;
|
||||
// Overwritten argument.
|
||||
uint32_t workId = workReg->workId();
|
||||
if (!liveIn.bitAt(workId))
|
||||
continue;
|
||||
|
||||
workReg->setArgIndex(i);
|
||||
workReg->setArgIndex(argIndex, valueIndex);
|
||||
const FuncValue& arg = func()->detail().arg(argIndex, valueIndex);
|
||||
|
||||
const FuncValue& arg = func()->detail().arg(i);
|
||||
if (arg.isReg() && _archRegsInfo->regInfo[arg.regType()].group() == workReg->group()) {
|
||||
workReg->setHintRegId(arg.regId());
|
||||
if (arg.isReg() && _archRegsInfo->regInfo[arg.regType()].group() == workReg->group()) {
|
||||
workReg->setHintRegId(arg.regId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1695,7 +1699,7 @@ Error RAPass::_markStackArgsToKeep() noexcept {
|
||||
// NOTE: Update StackOffset here so when `_argsAssignment.updateFuncFrame()`
|
||||
// is called it will take into consideration moving to stack slots. Without
|
||||
// this we may miss some scratch registers later.
|
||||
FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
|
||||
FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex(), workReg->argValueIndex());
|
||||
dstArg.assignStackOffset(0);
|
||||
}
|
||||
}
|
||||
@@ -1728,7 +1732,7 @@ Error RAPass::_updateStackArgs() noexcept {
|
||||
}
|
||||
}
|
||||
else {
|
||||
FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex());
|
||||
FuncValue& dstArg = _argsAssignment.arg(workReg->argIndex(), workReg->argValueIndex());
|
||||
dstArg.setStackOffset(slot->offset());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,76 +67,109 @@ static inline uint32_t x86KmovFromSize(uint32_t size) noexcept {
|
||||
// [asmjit::X86Internal - FuncDetail]
|
||||
// ============================================================================
|
||||
|
||||
ASMJIT_FAVOR_SIZE void unpackValues(FuncDetail& func, FuncValuePack& pack) noexcept {
|
||||
uint32_t typeId = pack[0].typeId();
|
||||
switch (typeId) {
|
||||
case Type::kIdI64:
|
||||
case Type::kIdU64: {
|
||||
if (Environment::is32Bit(func.callConv().arch())) {
|
||||
// Convert a 64-bit return value to two 32-bit return values.
|
||||
pack[0].initTypeId(Type::kIdU32);
|
||||
pack[1].initTypeId(typeId - 2);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ASMJIT_FAVOR_SIZE Error X86Internal::initFuncDetail(FuncDetail& func, const FuncSignature& signature, uint32_t registerSize) noexcept {
|
||||
const CallConv& cc = func.callConv();
|
||||
uint32_t arch = cc.arch();
|
||||
uint32_t stackOffset = cc._spillZoneSize;
|
||||
|
||||
uint32_t i;
|
||||
uint32_t argCount = func.argCount();
|
||||
|
||||
if (func.retCount() != 0) {
|
||||
uint32_t typeId = func._rets[0].typeId();
|
||||
switch (typeId) {
|
||||
case Type::kIdI64:
|
||||
case Type::kIdU64: {
|
||||
if (Environment::is32Bit(arch)) {
|
||||
// Convert a 64-bit return value to two 32-bit return values.
|
||||
func._retCount = 2;
|
||||
typeId -= 2;
|
||||
// Up to two return values can be returned in GP registers.
|
||||
static const uint8_t gpReturnIndexes[4] = {
|
||||
uint8_t(Gp::kIdAx),
|
||||
uint8_t(Gp::kIdDx),
|
||||
uint8_t(BaseReg::kIdBad),
|
||||
uint8_t(BaseReg::kIdBad)
|
||||
};
|
||||
|
||||
// 64-bit value is returned in EDX:EAX on X86.
|
||||
func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, typeId);
|
||||
func._rets[1].initReg(Reg::kTypeGpd, Gp::kIdDx, typeId);
|
||||
if (func.hasRet()) {
|
||||
unpackValues(func, func._rets);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
uint32_t typeId = func._rets[valueIndex].typeId();
|
||||
|
||||
// Terminate at the first void type (end of the pack).
|
||||
if (!typeId)
|
||||
break;
|
||||
|
||||
switch (typeId) {
|
||||
case Type::kIdI64:
|
||||
case Type::kIdU64: {
|
||||
if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad)
|
||||
func._rets[valueIndex].initReg(Reg::kTypeGpq, gpReturnIndexes[valueIndex], typeId);
|
||||
else
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
break;
|
||||
}
|
||||
else {
|
||||
func._rets[0].initReg(Reg::kTypeGpq, Gp::kIdAx, typeId);
|
||||
|
||||
case Type::kIdI8:
|
||||
case Type::kIdI16:
|
||||
case Type::kIdI32: {
|
||||
if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad)
|
||||
func._rets[valueIndex].initReg(Reg::kTypeGpd, gpReturnIndexes[valueIndex], Type::kIdI32);
|
||||
else
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::kIdI8:
|
||||
case Type::kIdI16:
|
||||
case Type::kIdI32: {
|
||||
func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, Type::kIdI32);
|
||||
break;
|
||||
}
|
||||
case Type::kIdU8:
|
||||
case Type::kIdU16:
|
||||
case Type::kIdU32: {
|
||||
if (gpReturnIndexes[valueIndex] != BaseReg::kIdBad)
|
||||
func._rets[valueIndex].initReg(Reg::kTypeGpd, gpReturnIndexes[valueIndex], Type::kIdU32);
|
||||
else
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::kIdU8:
|
||||
case Type::kIdU16:
|
||||
case Type::kIdU32: {
|
||||
func._rets[0].initReg(Reg::kTypeGpd, Gp::kIdAx, Type::kIdU32);
|
||||
break;
|
||||
}
|
||||
case Type::kIdF32:
|
||||
case Type::kIdF64: {
|
||||
uint32_t regType = Environment::is32Bit(arch) ? Reg::kTypeSt : Reg::kTypeXmm;
|
||||
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::kIdF32:
|
||||
case Type::kIdF64: {
|
||||
uint32_t regType = Environment::is32Bit(arch) ? Reg::kTypeSt : Reg::kTypeXmm;
|
||||
func._rets[0].initReg(regType, 0, typeId);
|
||||
break;
|
||||
}
|
||||
case Type::kIdF80: {
|
||||
// 80-bit floats are always returned by FP0.
|
||||
func._rets[valueIndex].initReg(Reg::kTypeSt, valueIndex, typeId);
|
||||
break;
|
||||
}
|
||||
|
||||
case Type::kIdF80: {
|
||||
// 80-bit floats are always returned by FP0.
|
||||
func._rets[0].initReg(Reg::kTypeSt, 0, typeId);
|
||||
break;
|
||||
}
|
||||
case Type::kIdMmx32:
|
||||
case Type::kIdMmx64: {
|
||||
// MM registers are returned through XMM (SystemV) or GPQ (Win64).
|
||||
uint32_t regType = Reg::kTypeMm;
|
||||
uint32_t regIndex = valueIndex;
|
||||
if (Environment::is64Bit(arch)) {
|
||||
regType = cc.strategy() == CallConv::kStrategyDefault ? Reg::kTypeXmm : Reg::kTypeGpq;
|
||||
regIndex = cc.strategy() == CallConv::kStrategyDefault ? valueIndex : gpReturnIndexes[valueIndex];
|
||||
|
||||
case Type::kIdMmx32:
|
||||
case Type::kIdMmx64: {
|
||||
// MM registers are returned through XMM (SystemV) or GPQ (Win64).
|
||||
uint32_t regType = Reg::kTypeMm;
|
||||
if (Environment::is64Bit(arch))
|
||||
regType = cc.strategy() == CallConv::kStrategyDefault ? Reg::kTypeXmm : Reg::kTypeGpq;
|
||||
if (regIndex == BaseReg::kIdBad)
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
}
|
||||
|
||||
func._rets[0].initReg(regType, 0, typeId);
|
||||
break;
|
||||
}
|
||||
func._rets[valueIndex].initReg(regType, regIndex, typeId);
|
||||
break;
|
||||
}
|
||||
|
||||
default: {
|
||||
func._rets[0].initReg(x86VecTypeIdToRegType(typeId), 0, typeId);
|
||||
break;
|
||||
default: {
|
||||
func._rets[valueIndex].initReg(x86VecTypeIdToRegType(typeId), valueIndex, typeId);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -146,62 +179,71 @@ ASMJIT_FAVOR_SIZE Error X86Internal::initFuncDetail(FuncDetail& func, const Func
|
||||
uint32_t gpzPos = 0;
|
||||
uint32_t vecPos = 0;
|
||||
|
||||
for (i = 0; i < argCount; i++) {
|
||||
FuncValue& arg = func._args[i];
|
||||
uint32_t typeId = arg.typeId();
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
unpackValues(func, func._args[argIndex]);
|
||||
|
||||
if (Type::isInt(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
FuncValue& arg = func._args[argIndex][valueIndex];
|
||||
|
||||
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupGp].id[gpzPos];
|
||||
// Terminate if there are no more arguments in the pack.
|
||||
if (!arg)
|
||||
break;
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
uint32_t regType = (typeId <= Type::kIdU32) ? Reg::kTypeGpd : Reg::kTypeGpq;
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
|
||||
gpzPos++;
|
||||
}
|
||||
else {
|
||||
uint32_t size = Support::max<uint32_t>(Type::sizeOf(typeId), registerSize);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
uint32_t typeId = arg.typeId();
|
||||
|
||||
if (Type::isFloat(typeId) || Type::isVec(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
if (Type::isInt(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (vecPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupVec].id[vecPos];
|
||||
if (gpzPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupGp].id[gpzPos];
|
||||
|
||||
if (Type::isFloat(typeId)) {
|
||||
// If this is a float, but `kFlagPassFloatsByVec` is false, we have
|
||||
// to use stack instead. This should be only used by 32-bit calling
|
||||
// conventions.
|
||||
if (!cc.hasFlag(CallConv::kFlagPassFloatsByVec))
|
||||
regId = BaseReg::kIdBad;
|
||||
}
|
||||
else {
|
||||
// Pass vector registers via stack if this is a variable arguments
|
||||
// function. This should be only used by 32-bit calling conventions.
|
||||
if (signature.hasVarArgs() && cc.hasFlag(CallConv::kFlagPassVecByStackIfVA))
|
||||
regId = BaseReg::kIdBad;
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
uint32_t regType = (typeId <= Type::kIdU32) ? Reg::kTypeGpd : Reg::kTypeGpq;
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
|
||||
gpzPos++;
|
||||
}
|
||||
else {
|
||||
uint32_t size = Support::max<uint32_t>(Type::sizeOf(typeId), registerSize);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
arg.initTypeId(typeId);
|
||||
arg.assignRegData(x86VecTypeIdToRegType(typeId), regId);
|
||||
func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
|
||||
vecPos++;
|
||||
if (Type::isFloat(typeId) || Type::isVec(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (vecPos < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupVec].id[vecPos];
|
||||
|
||||
if (Type::isFloat(typeId)) {
|
||||
// If this is a float, but `kFlagPassFloatsByVec` is false, we have
|
||||
// to use stack instead. This should be only used by 32-bit calling
|
||||
// conventions.
|
||||
if (!cc.hasFlag(CallConv::kFlagPassFloatsByVec))
|
||||
regId = BaseReg::kIdBad;
|
||||
}
|
||||
else {
|
||||
// Pass vector registers via stack if this is a variable arguments
|
||||
// function. This should be only used by 32-bit calling conventions.
|
||||
if (signature.hasVarArgs() && cc.hasFlag(CallConv::kFlagPassVecByStackIfVA))
|
||||
regId = BaseReg::kIdBad;
|
||||
}
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
arg.initTypeId(typeId);
|
||||
arg.assignRegData(x86VecTypeIdToRegType(typeId), regId);
|
||||
func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
|
||||
vecPos++;
|
||||
}
|
||||
else {
|
||||
uint32_t size = Type::sizeOf(typeId);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
uint32_t size = Type::sizeOf(typeId);
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += size;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -226,68 +268,75 @@ ASMJIT_FAVOR_SIZE Error X86Internal::initFuncDetail(FuncDetail& func, const Func
|
||||
// RCX XMM1 R8 XMM3
|
||||
//
|
||||
// Unused vector registers are used by HVA.
|
||||
|
||||
bool isVectorCall = (cc.strategy() == CallConv::kStrategyX64VectorCall);
|
||||
|
||||
for (i = 0; i < argCount; i++) {
|
||||
FuncValue& arg = func._args[i];
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
unpackValues(func, func._args[argIndex]);
|
||||
|
||||
uint32_t typeId = arg.typeId();
|
||||
uint32_t size = Type::sizeOf(typeId);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
FuncValue& arg = func._args[argIndex][valueIndex];
|
||||
|
||||
if (Type::isInt(typeId) || Type::isMmx(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
// Terminate if there are no more arguments in the pack.
|
||||
if (!arg)
|
||||
break;
|
||||
|
||||
if (i < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupGp].id[i];
|
||||
uint32_t typeId = arg.typeId();
|
||||
uint32_t size = Type::sizeOf(typeId);
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
uint32_t regType = (size <= 4 && !Type::isMmx(typeId)) ? Reg::kTypeGpd : Reg::kTypeGpq;
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
|
||||
}
|
||||
else {
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
stackOffset += 8;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (Type::isInt(typeId) || Type::isMmx(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (Type::isFloat(typeId) || Type::isVec(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
if (argIndex < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupGp].id[argIndex];
|
||||
|
||||
if (i < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupVec].id[i];
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
// X64-ABI doesn't allow vector types (XMM|YMM|ZMM) to be passed
|
||||
// via registers, however, VectorCall was designed for that purpose.
|
||||
if (Type::isFloat(typeId) || isVectorCall) {
|
||||
uint32_t regType = x86VecTypeIdToRegType(typeId);
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
uint32_t regType = (size <= 4 && !Type::isMmx(typeId)) ? Reg::kTypeGpd : Reg::kTypeGpq;
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
|
||||
continue;
|
||||
func.addUsedRegs(Reg::kGroupGp, Support::bitMask(regId));
|
||||
}
|
||||
}
|
||||
|
||||
// Passed via stack if the argument is float/double or indirectly.
|
||||
// The trap is - if the argument is passed indirectly, the address
|
||||
// can be passed via register, if the argument's index has GP one.
|
||||
if (Type::isFloat(typeId)) {
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
}
|
||||
else {
|
||||
uint32_t gpRegId = cc._passedOrder[Reg::kGroupGp].id[i];
|
||||
if (gpRegId != BaseReg::kIdBad)
|
||||
arg.assignRegData(Reg::kTypeGpq, gpRegId);
|
||||
else
|
||||
else {
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
arg.addFlags(FuncValue::kFlagIsIndirect);
|
||||
stackOffset += 8;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Always 8 bytes (float/double/pointer).
|
||||
stackOffset += 8;
|
||||
continue;
|
||||
if (Type::isFloat(typeId) || Type::isVec(typeId)) {
|
||||
uint32_t regId = BaseReg::kIdBad;
|
||||
|
||||
if (argIndex < CallConv::kMaxRegArgsPerGroup)
|
||||
regId = cc._passedOrder[Reg::kGroupVec].id[argIndex];
|
||||
|
||||
if (regId != BaseReg::kIdBad) {
|
||||
// X64-ABI doesn't allow vector types (XMM|YMM|ZMM) to be passed
|
||||
// via registers, however, VectorCall was designed for that purpose.
|
||||
if (Type::isFloat(typeId) || isVectorCall) {
|
||||
uint32_t regType = x86VecTypeIdToRegType(typeId);
|
||||
arg.assignRegData(regType, regId);
|
||||
func.addUsedRegs(Reg::kGroupVec, Support::bitMask(regId));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// Passed via stack if the argument is float/double or indirectly.
|
||||
// The trap is - if the argument is passed indirectly, the address
|
||||
// can be passed via register, if the argument's index has GP one.
|
||||
if (Type::isFloat(typeId)) {
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
}
|
||||
else {
|
||||
uint32_t gpRegId = cc._passedOrder[Reg::kGroupGp].id[argIndex];
|
||||
if (gpRegId != BaseReg::kIdBad)
|
||||
arg.assignRegData(Reg::kTypeGpq, gpRegId);
|
||||
else
|
||||
arg.assignStackOffset(int32_t(stackOffset));
|
||||
arg.addFlags(FuncValue::kFlagIsIndirect);
|
||||
}
|
||||
|
||||
// Always 8 bytes (float/double/pointer).
|
||||
stackOffset += 8;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
@@ -432,7 +481,7 @@ public:
|
||||
uint8_t _saVarId;
|
||||
uint32_t _varCount;
|
||||
WorkData _workData[BaseReg::kGroupVirt];
|
||||
Var _vars[kFuncArgCountLoHi + 1];
|
||||
Var _vars[Globals::kMaxFuncArgs + 1];
|
||||
|
||||
X86FuncArgsContext() noexcept;
|
||||
|
||||
@@ -466,7 +515,6 @@ ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::initWorkData(const FuncFrame& frame,
|
||||
// The code has to be updated if this changes.
|
||||
ASMJIT_ASSERT(BaseReg::kGroupVirt == 4);
|
||||
|
||||
uint32_t i;
|
||||
const FuncDetail& func = *args.funcDetail();
|
||||
|
||||
// Initialize Architecture.
|
||||
@@ -486,95 +534,98 @@ ASMJIT_FAVOR_SIZE Error X86FuncArgsContext::initWorkData(const FuncFrame& frame,
|
||||
|
||||
// Extract information from all function arguments/assignments and build Var[] array.
|
||||
uint32_t varId = 0;
|
||||
for (i = 0; i < kFuncArgCountLoHi; i++) {
|
||||
const FuncValue& dst_ = args.arg(i);
|
||||
if (!dst_.isAssigned())
|
||||
continue;
|
||||
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
|
||||
if (!dst_.isAssigned())
|
||||
continue;
|
||||
|
||||
const FuncValue& src_ = func.arg(i);
|
||||
if (ASMJIT_UNLIKELY(!src_.isAssigned()))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
Var& var = _vars[varId];
|
||||
var.init(src_, dst_);
|
||||
|
||||
FuncValue& src = var.cur;
|
||||
FuncValue& dst = var.out;
|
||||
|
||||
uint32_t dstGroup = 0xFFFFFFFFu;
|
||||
uint32_t dstId = BaseReg::kIdBad;
|
||||
WorkData* dstWd = nullptr;
|
||||
|
||||
// Not supported.
|
||||
if (src.isIndirect())
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
|
||||
if (dst.isReg()) {
|
||||
uint32_t dstType = dst.regType();
|
||||
if (ASMJIT_UNLIKELY(dstType >= Reg::kTypeCount))
|
||||
return DebugUtils::errored(kErrorInvalidRegType);
|
||||
|
||||
// Copy TypeId from source if the destination doesn't have it. The RA
|
||||
// used by BaseCompiler would never leave TypeId undefined, but users
|
||||
// of FuncAPI can just assign phys regs without specifying the type.
|
||||
if (!dst.hasTypeId())
|
||||
dst.setTypeId(Reg::typeIdOf(dst.regType()));
|
||||
|
||||
dstGroup = Reg::groupOf(dstType);
|
||||
if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
|
||||
return DebugUtils::errored(kErrorInvalidRegGroup);
|
||||
|
||||
dstWd = &_workData[dstGroup];
|
||||
dstId = dst.regId();
|
||||
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
|
||||
return DebugUtils::errored(kErrorInvalidPhysId);
|
||||
|
||||
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
|
||||
return DebugUtils::errored(kErrorOverlappedRegs);
|
||||
|
||||
dstWd->_dstRegs |= Support::bitMask(dstId);
|
||||
dstWd->_dstShuf |= Support::bitMask(dstId);
|
||||
dstWd->_usedRegs |= Support::bitMask(dstId);
|
||||
}
|
||||
else {
|
||||
if (!dst.hasTypeId())
|
||||
dst.setTypeId(src.typeId());
|
||||
|
||||
RegInfo regInfo = x86GetRegForMemToMemMove(arch, dst.typeId(), src.typeId());
|
||||
if (ASMJIT_UNLIKELY(!regInfo.isValid()))
|
||||
const FuncValue& src_ = func.arg(argIndex, valueIndex);
|
||||
if (ASMJIT_UNLIKELY(!src_.isAssigned()))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
|
||||
}
|
||||
|
||||
if (src.isReg()) {
|
||||
uint32_t srcId = src.regId();
|
||||
uint32_t srcGroup = Reg::groupOf(src.regType());
|
||||
Var& var = _vars[varId];
|
||||
var.init(src_, dst_);
|
||||
|
||||
if (dstGroup == srcGroup) {
|
||||
dstWd->assign(varId, srcId);
|
||||
FuncValue& src = var.cur;
|
||||
FuncValue& dst = var.out;
|
||||
|
||||
// The best case, register is allocated where it is expected to be.
|
||||
if (dstId == srcId)
|
||||
var.markDone();
|
||||
uint32_t dstGroup = 0xFFFFFFFFu;
|
||||
uint32_t dstId = BaseReg::kIdBad;
|
||||
WorkData* dstWd = nullptr;
|
||||
|
||||
// Not supported.
|
||||
if (src.isIndirect())
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
|
||||
if (dst.isReg()) {
|
||||
uint32_t dstType = dst.regType();
|
||||
if (ASMJIT_UNLIKELY(dstType >= Reg::kTypeCount))
|
||||
return DebugUtils::errored(kErrorInvalidRegType);
|
||||
|
||||
// Copy TypeId from source if the destination doesn't have it. The RA
|
||||
// used by BaseCompiler would never leave TypeId undefined, but users
|
||||
// of FuncAPI can just assign phys regs without specifying the type.
|
||||
if (!dst.hasTypeId())
|
||||
dst.setTypeId(Reg::typeIdOf(dst.regType()));
|
||||
|
||||
dstGroup = Reg::groupOf(dstType);
|
||||
if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
|
||||
return DebugUtils::errored(kErrorInvalidRegGroup);
|
||||
|
||||
dstWd = &_workData[dstGroup];
|
||||
dstId = dst.regId();
|
||||
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId)))
|
||||
return DebugUtils::errored(kErrorInvalidPhysId);
|
||||
|
||||
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId)))
|
||||
return DebugUtils::errored(kErrorOverlappedRegs);
|
||||
|
||||
dstWd->_dstRegs |= Support::bitMask(dstId);
|
||||
dstWd->_dstShuf |= Support::bitMask(dstId);
|
||||
dstWd->_usedRegs |= Support::bitMask(dstId);
|
||||
}
|
||||
else {
|
||||
if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
|
||||
if (!dst.hasTypeId())
|
||||
dst.setTypeId(src.typeId());
|
||||
|
||||
RegInfo regInfo = x86GetRegForMemToMemMove(arch, dst.typeId(), src.typeId());
|
||||
if (ASMJIT_UNLIKELY(!regInfo.isValid()))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
WorkData& srcData = _workData[srcGroup];
|
||||
srcData.assign(varId, srcId);
|
||||
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (dstWd)
|
||||
dstWd->_numStackArgs++;
|
||||
_hasStackSrc = true;
|
||||
}
|
||||
|
||||
varId++;
|
||||
if (src.isReg()) {
|
||||
uint32_t srcId = src.regId();
|
||||
uint32_t srcGroup = Reg::groupOf(src.regType());
|
||||
|
||||
if (dstGroup == srcGroup) {
|
||||
dstWd->assign(varId, srcId);
|
||||
|
||||
// The best case, register is allocated where it is expected to be.
|
||||
if (dstId == srcId)
|
||||
var.markDone();
|
||||
}
|
||||
else {
|
||||
if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
|
||||
return DebugUtils::errored(kErrorInvalidState);
|
||||
|
||||
WorkData& srcData = _workData[srcGroup];
|
||||
srcData.assign(varId, srcId);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (dstWd)
|
||||
dstWd->_numStackArgs++;
|
||||
_hasStackSrc = true;
|
||||
}
|
||||
|
||||
varId++;
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize WorkData::workRegs.
|
||||
uint32_t i;
|
||||
for (i = 0; i < BaseReg::kGroupVirt; i++)
|
||||
_workData[i]._workRegs = (_workData[i].archRegs() & (frame.dirtyRegs(i) | ~frame.preservedRegs(i))) | _workData[i].dstRegs() | _workData[i].assignedRegs();
|
||||
|
||||
|
||||
@@ -404,22 +404,20 @@ Error X86RACFGBuilder::onInst(InstNode* inst, uint32_t& controlType, RAInstBuild
|
||||
// ============================================================================
|
||||
|
||||
Error X86RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
uint32_t retCount = invokeNode->retCount();
|
||||
const FuncDetail& fd = invokeNode->detail();
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
|
||||
cc()->_setCursor(invokeNode->prev());
|
||||
|
||||
uint32_t nativeRegType = cc()->_gpRegInfo.type();
|
||||
|
||||
for (uint32_t loIndex = 0; loIndex < argCount; loIndex++) {
|
||||
for (uint32_t hiIndex = 0; hiIndex <= kFuncArgHi; hiIndex += kFuncArgHi) {
|
||||
uint32_t argIndex = loIndex + hiIndex;
|
||||
if (!fd.hasArg(argIndex))
|
||||
continue;
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
if (!argPack[valueIndex])
|
||||
break;
|
||||
|
||||
const FuncValue& arg = fd.arg(argIndex);
|
||||
const Operand& op = invokeNode->arg(argIndex);
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
const Operand& op = invokeNode->arg(argIndex, valueIndex);
|
||||
|
||||
if (op.isNone())
|
||||
continue;
|
||||
@@ -443,7 +441,7 @@ Error X86RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
|
||||
BaseReg indirectReg;
|
||||
moveVecToPtr(invokeNode, arg, reg.as<Vec>(), &indirectReg);
|
||||
invokeNode->_args[argIndex] = indirectReg;
|
||||
invokeNode->_args[argIndex][valueIndex] = indirectReg;
|
||||
}
|
||||
else {
|
||||
if (regGroup != argGroup) {
|
||||
@@ -475,7 +473,7 @@ Error X86RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
if (arg.isReg()) {
|
||||
BaseReg reg;
|
||||
ASMJIT_PROPAGATE(moveImmToRegArg(invokeNode, arg, op.as<Imm>(), ®));
|
||||
invokeNode->_args[argIndex] = reg;
|
||||
invokeNode->_args[argIndex][valueIndex] = reg;
|
||||
}
|
||||
else {
|
||||
ASMJIT_PROPAGATE(moveImmToStackArg(invokeNode, arg, op.as<Imm>()));
|
||||
@@ -488,53 +486,57 @@ Error X86RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
if (fd.hasFlag(CallConv::kFlagCalleePopsStack))
|
||||
ASMJIT_PROPAGATE(cc()->sub(cc()->zsp(), fd.argStackSize()));
|
||||
|
||||
for (uint32_t retIndex = 0; retIndex < retCount; retIndex++) {
|
||||
const FuncValue& ret = fd.ret(retIndex);
|
||||
const Operand& op = invokeNode->ret(retIndex);
|
||||
if (fd.hasRet()) {
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
const FuncValue& ret = fd.ret(valueIndex);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
const Operand& op = invokeNode->ret(valueIndex);
|
||||
if (op.isReg()) {
|
||||
const Reg& reg = op.as<Reg>();
|
||||
RAWorkReg* workReg;
|
||||
ASMJIT_PROPAGATE(_pass->virtIndexAsWorkReg(Operand::virtIdToIndex(reg.id()), &workReg));
|
||||
|
||||
if (ret.isReg()) {
|
||||
if (ret.regType() == Reg::kTypeSt) {
|
||||
if (workReg->group() != Reg::kGroupVec)
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
|
||||
Reg dst = Reg(workReg->signature(), workReg->virtId());
|
||||
Mem mem;
|
||||
|
||||
uint32_t typeId = Type::baseOf(workReg->typeId());
|
||||
if (ret.hasTypeId())
|
||||
typeId = ret.typeId();
|
||||
|
||||
switch (typeId) {
|
||||
case Type::kIdF32:
|
||||
ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 4, 4));
|
||||
mem.setSize(4);
|
||||
ASMJIT_PROPAGATE(cc()->fstp(mem));
|
||||
ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovss, Inst::kIdVmovss), dst.as<Xmm>(), mem));
|
||||
break;
|
||||
|
||||
case Type::kIdF64:
|
||||
ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 8, 4));
|
||||
mem.setSize(8);
|
||||
ASMJIT_PROPAGATE(cc()->fstp(mem));
|
||||
ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovsd, Inst::kIdVmovsd), dst.as<Xmm>(), mem));
|
||||
break;
|
||||
|
||||
default:
|
||||
if (ret.isReg()) {
|
||||
if (ret.regType() == Reg::kTypeSt) {
|
||||
if (workReg->group() != Reg::kGroupVec)
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
else {
|
||||
uint32_t regGroup = workReg->group();
|
||||
uint32_t retGroup = Reg::groupOf(ret.regType());
|
||||
|
||||
if (regGroup != retGroup) {
|
||||
// TODO: Conversion is not supported.
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
Reg dst = Reg(workReg->signature(), workReg->virtId());
|
||||
Mem mem;
|
||||
|
||||
uint32_t typeId = Type::baseOf(workReg->typeId());
|
||||
if (ret.hasTypeId())
|
||||
typeId = ret.typeId();
|
||||
|
||||
switch (typeId) {
|
||||
case Type::kIdF32:
|
||||
ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 4, 4));
|
||||
mem.setSize(4);
|
||||
ASMJIT_PROPAGATE(cc()->fstp(mem));
|
||||
ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovss, Inst::kIdVmovss), dst.as<Xmm>(), mem));
|
||||
break;
|
||||
|
||||
case Type::kIdF64:
|
||||
ASMJIT_PROPAGATE(_pass->useTemporaryMem(mem, 8, 4));
|
||||
mem.setSize(8);
|
||||
ASMJIT_PROPAGATE(cc()->fstp(mem));
|
||||
ASMJIT_PROPAGATE(cc()->emit(choose(Inst::kIdMovsd, Inst::kIdVmovsd), dst.as<Xmm>(), mem));
|
||||
break;
|
||||
|
||||
default:
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
else {
|
||||
uint32_t regGroup = workReg->group();
|
||||
uint32_t retGroup = Reg::groupOf(ret.regType());
|
||||
|
||||
if (regGroup != retGroup) {
|
||||
// TODO: Conversion is not supported.
|
||||
return DebugUtils::errored(kErrorInvalidAssignment);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -551,16 +553,16 @@ Error X86RACFGBuilder::onBeforeInvoke(InvokeNode* invokeNode) noexcept {
|
||||
|
||||
Error X86RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexcept {
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
uint32_t retCount = invokeNode->retCount();
|
||||
const FuncDetail& fd = invokeNode->detail();
|
||||
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
|
||||
if (!fd.hasArg(argIndex + argHi))
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
if (!argPack[valueIndex])
|
||||
continue;
|
||||
|
||||
const FuncValue& arg = fd.arg(argIndex + argHi);
|
||||
const Operand& op = invokeNode->arg(argIndex + argHi);
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
const Operand& op = invokeNode->arg(argIndex, valueIndex);
|
||||
|
||||
if (op.isNone())
|
||||
continue;
|
||||
@@ -588,11 +590,13 @@ Error X86RACFGBuilder::onInvoke(InvokeNode* invokeNode, RAInstBuilder& ib) noexc
|
||||
}
|
||||
}
|
||||
|
||||
for (uint32_t retIndex = 0; retIndex < retCount; retIndex++) {
|
||||
for (uint32_t retIndex = 0; retIndex < Globals::kMaxValuePack; retIndex++) {
|
||||
const FuncValue& ret = fd.ret(retIndex);
|
||||
const Operand& op = invokeNode->ret(retIndex);
|
||||
if (!ret)
|
||||
break;
|
||||
|
||||
// Not handled here...
|
||||
const Operand& op = invokeNode->ret(retIndex);
|
||||
if (ret.regType() == Reg::kTypeSt)
|
||||
continue;
|
||||
|
||||
@@ -1223,19 +1227,20 @@ Error X86RAPass::onEmitJump(const Label& label) noexcept {
|
||||
|
||||
Error X86RAPass::onEmitPreCall(InvokeNode* invokeNode) noexcept {
|
||||
if (invokeNode->detail().hasVarArgs() && cc()->is64Bit()) {
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
const FuncDetail& fd = invokeNode->detail();
|
||||
uint32_t argCount = invokeNode->argCount();
|
||||
|
||||
switch (invokeNode->detail().callConv().id()) {
|
||||
case CallConv::kIdX64SystemV: {
|
||||
// AL register contains the number of arguments passed in XMM register(s).
|
||||
uint32_t n = 0;
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
|
||||
if (!fd.hasArg(argIndex + argHi))
|
||||
continue;
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
if (!arg)
|
||||
break;
|
||||
|
||||
const FuncValue& arg = fd.arg(argIndex + argHi);
|
||||
if (arg.isReg() && Reg::groupOf(arg.regType()) == Reg::kGroupVec)
|
||||
n++;
|
||||
}
|
||||
@@ -1251,11 +1256,12 @@ Error X86RAPass::onEmitPreCall(InvokeNode* invokeNode) noexcept {
|
||||
case CallConv::kIdX64Windows: {
|
||||
// Each double-precision argument passed in XMM must be also passed in GP.
|
||||
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
|
||||
for (uint32_t argHi = 0; argHi <= kFuncArgHi; argHi += kFuncArgHi) {
|
||||
if (!fd.hasArg(argIndex + argHi))
|
||||
continue;
|
||||
const FuncValuePack& argPack = fd.argPack(argIndex);
|
||||
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
|
||||
const FuncValue& arg = argPack[valueIndex];
|
||||
if (!arg)
|
||||
break;
|
||||
|
||||
const FuncValue& arg = fd.arg(argIndex + argHi);
|
||||
if (arg.isReg() && Reg::groupOf(arg.regType()) == Reg::kGroupVec) {
|
||||
Gp dst = gpq(fd.callConv().passedOrder(Reg::kGroupGp)[argIndex]);
|
||||
Xmm src = xmm(arg.regId());
|
||||
|
||||
@@ -3676,6 +3676,68 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [X86Test_FuncCallInt64Arg]
|
||||
// ============================================================================
|
||||
|
||||
class X86Test_FuncCallInt64Arg : public X86Test {
|
||||
public:
|
||||
X86Test_FuncCallInt64Arg() : X86Test("FuncCallInt64Arg") {}
|
||||
|
||||
static void add(X86TestApp& app) {
|
||||
app.add(new X86Test_FuncCallInt64Arg());
|
||||
}
|
||||
|
||||
virtual void compile(x86::Compiler& cc) {
|
||||
cc.addFunc(FuncSignatureT<uint64_t, uint64_t>(CallConv::kIdHost));
|
||||
|
||||
if (cc.is64Bit()) {
|
||||
x86::Gp reg = cc.newUInt64();
|
||||
cc.setArg(0, reg);
|
||||
cc.add(reg, 1);
|
||||
cc.ret(reg);
|
||||
}
|
||||
else {
|
||||
x86::Gp hi = cc.newUInt32("hi");
|
||||
x86::Gp lo = cc.newUInt32("lo");
|
||||
|
||||
cc.setArg(0, 0, lo);
|
||||
cc.setArg(0, 1, hi);
|
||||
|
||||
cc.add(lo, 1);
|
||||
cc.adc(hi, 0);
|
||||
cc.ret(lo, hi);
|
||||
}
|
||||
|
||||
cc.endFunc();
|
||||
}
|
||||
|
||||
virtual bool run(void* _func, String& result, String& expect) {
|
||||
typedef uint64_t (*Func)(uint64_t);
|
||||
Func func = ptr_as_func<Func>(_func);
|
||||
|
||||
uint64_t resultRet = func(uint64_t(0xFFFFFFFF));
|
||||
uint64_t expectRet = 0x100000000;
|
||||
|
||||
result.assignFormat("ret=%llu", (unsigned long long)resultRet);
|
||||
expect.assignFormat("ret=%llu", (unsigned long long)expectRet);
|
||||
|
||||
return resultRet == expectRet;
|
||||
}
|
||||
|
||||
static double calledFunc(size_t n, ...) {
|
||||
double sum = 0;
|
||||
va_list ap;
|
||||
va_start(ap, n);
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
double arg = va_arg(ap, double);
|
||||
sum += arg;
|
||||
}
|
||||
va_end(ap);
|
||||
return sum;
|
||||
}
|
||||
};
|
||||
|
||||
// ============================================================================
|
||||
// [X86Test_FuncCallMisc1]
|
||||
// ============================================================================
|
||||
@@ -4315,6 +4377,7 @@ int main(int argc, char* argv[]) {
|
||||
app.addT<X86Test_FuncCallRecursive>();
|
||||
app.addT<X86Test_FuncCallVarArg1>();
|
||||
app.addT<X86Test_FuncCallVarArg2>();
|
||||
app.addT<X86Test_FuncCallInt64Arg>();
|
||||
app.addT<X86Test_FuncCallMisc1>();
|
||||
app.addT<X86Test_FuncCallMisc2>();
|
||||
app.addT<X86Test_FuncCallMisc3>();
|
||||
|
||||
Reference in New Issue
Block a user