- Initial support for StaticRuntime (known base address before the code address is generated).

- Bind moved from X86Assembler to Assembler.
- Added helpers to get offset of labels (useful after the code is generated).
- Added support to cache and reuse Assembler if only Compiler is used.
- CodeGen getFeature(), setFeature() and friends are now inlined.
- Added offset to TargetNode, which is filled after the code is serialized.
This commit is contained in:
kobalicek
2014-09-08 20:24:31 +02:00
parent d7fc62d9e9
commit 9ead0cfb4c
25 changed files with 980 additions and 604 deletions

View File

@@ -84,8 +84,6 @@ struct X86Test_AlignBase : public X86Test {
// Alloc, use and spill preserved registers. // Alloc, use and spill preserved registers.
if (_varCount) { if (_varCount) {
uint32_t gpCount = c.getRegCount().getGp(); uint32_t gpCount = c.getRegCount().getGp();
c.comment("Var");
uint32_t varIndex = 0; uint32_t varIndex = 0;
uint32_t regIndex = 0; uint32_t regIndex = 0;
uint32_t regMask = 0x1; uint32_t regMask = 0x1;
@@ -107,12 +105,8 @@ struct X86Test_AlignBase : public X86Test {
// Do a sum of arguments to verify possible relocation when misaligned. // Do a sum of arguments to verify possible relocation when misaligned.
if (_argCount) { if (_argCount) {
uint32_t argIndex;
c.comment("Arg");
c.xor_(gpSum, gpSum); c.xor_(gpSum, gpSum);
for (uint32_t argIndex = 0; argIndex < _argCount; argIndex++) {
for (argIndex = 0; argIndex < _argCount; argIndex++) {
X86GpVar gpArg(c, kVarTypeInt32); X86GpVar gpArg(c, kVarTypeInt32);
c.setArg(argIndex, gpArg); c.setArg(argIndex, gpArg);
@@ -121,7 +115,6 @@ struct X86Test_AlignBase : public X86Test {
} }
// Check alignment of xmmVar (has to be 16). // Check alignment of xmmVar (has to be 16).
c.comment("Ret");
c.lea(gpVar, xmmVar.m()); c.lea(gpVar, xmmVar.m());
c.shl(gpVar.r32(), 28); c.shl(gpVar.r32(), 28);
@@ -1660,7 +1653,7 @@ struct X86Test_AllocBlend : public X86Test {
// Has to be aligned. // Has to be aligned.
uint32_t* dstBuffer = (uint32_t*)IntUtil::alignTo<intptr_t>((intptr_t)_dstBuffer, 16); uint32_t* dstBuffer = (uint32_t*)IntUtil::alignTo<intptr_t>((intptr_t)_dstBuffer, 16);
uint32_t* srcBuffer = (uint32_t*)IntUtil::alignTo<intptr_t>((intptr_t)_srcBuffer, 16); uint32_t* srcBuffer = (uint32_t*)IntUtil::alignTo<intptr_t>((intptr_t)_srcBuffer, 16);
::memcpy(dstBuffer, dstConstData, sizeof(dstConstData)); ::memcpy(dstBuffer, dstConstData, sizeof(dstConstData));
::memcpy(srcBuffer, srcConstData, sizeof(srcConstData)); ::memcpy(srcBuffer, srcConstData, sizeof(srcConstData));

View File

@@ -43,8 +43,10 @@ Assembler::~Assembler() {
void Assembler::reset(bool releaseMemory) { void Assembler::reset(bool releaseMemory) {
// CodeGen members. // CodeGen members.
_baseAddress = kNoBaseAddress;
_instOptions = 0;
_error = kErrorOk; _error = kErrorOk;
_options = 0;
_baseZone.reset(releaseMemory); _baseZone.reset(releaseMemory);
// Assembler members. // Assembler members.
@@ -60,8 +62,8 @@ void Assembler::reset(bool releaseMemory) {
_comment = NULL; _comment = NULL;
_unusedLinks = NULL; _unusedLinks = NULL;
_labels.reset(releaseMemory); _labelList.reset(releaseMemory);
_relocData.reset(releaseMemory); _relocList.reset(releaseMemory);
} }
// ============================================================================ // ============================================================================
@@ -131,11 +133,11 @@ Error Assembler::_reserve(size_t n) {
// ============================================================================ // ============================================================================
Error Assembler::_registerIndexedLabels(size_t index) { Error Assembler::_registerIndexedLabels(size_t index) {
size_t i = _labels.getLength(); size_t i = _labelList.getLength();
if (index < i) if (index < i)
return kErrorOk; return kErrorOk;
if (_labels._grow(index - i) != kErrorOk) if (_labelList._grow(index - i) != kErrorOk)
return setError(kErrorNoHeapMemory); return setError(kErrorNoHeapMemory);
LabelData data; LabelData data;
@@ -143,7 +145,7 @@ Error Assembler::_registerIndexedLabels(size_t index) {
data.links = NULL; data.links = NULL;
do { do {
_labels.append(data); _labelList.append(data);
} while (++i < index); } while (++i < index);
return kErrorOk; return kErrorOk;
@@ -152,13 +154,13 @@ Error Assembler::_registerIndexedLabels(size_t index) {
Error Assembler::_newLabel(Label* dst) { Error Assembler::_newLabel(Label* dst) {
dst->_label.op = kOperandTypeLabel; dst->_label.op = kOperandTypeLabel;
dst->_label.size = 0; dst->_label.size = 0;
dst->_label.id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labels.getLength())); dst->_label.id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labelList.getLength()));
LabelData data; LabelData data;
data.offset = -1; data.offset = -1;
data.links = NULL; data.links = NULL;
if (_labels.append(data) != kErrorOk) if (_labelList.append(data) != kErrorOk)
goto _NoMemory; goto _NoMemory;
return kErrorOk; return kErrorOk;
@@ -187,6 +189,80 @@ LabelLink* Assembler::_newLabelLink() {
return link; return link;
} }
Error Assembler::bind(const Label& label) {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelData(index);
// Label can be bound only once.
if (data->offset != -1)
return setError(kErrorLabelAlreadyBound);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleLabel, "L%u:\n", index);
#endif // !ASMJIT_DISABLE_LOGGER
Error error = kErrorOk;
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* prev = NULL;
while (link) {
intptr_t offset = link->offset;
if (link->relocId != -1) {
// Handle RelocData - We have to update RelocData information instead of
// patching the displacement in LabelData.
_relocList[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
// displacement in the binary stream.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = getByteAt(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
setInt32At(offset, patchedValue);
}
else {
ASMJIT_ASSERT(size == 1);
if (IntUtil::isInt8(patchedValue))
setByteAt(offset, static_cast<uint8_t>(patchedValue & 0xFF));
else
error = kErrorIllegalDisplacement;
}
}
prev = link->prev;
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == NULL)
prev = link;
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Set as bound (offset is zero or greater and no links).
data->offset = pos;
data->links = NULL;
if (error != kErrorOk)
return setError(error);
return error;
}
// ============================================================================ // ============================================================================
// [asmjit::Assembler - Embed] // [asmjit::Assembler - Embed]
// ============================================================================ // ============================================================================
@@ -210,6 +286,19 @@ Error Assembler::embed(const void* data, uint32_t size) {
return kErrorOk; return kErrorOk;
} }
// ============================================================================
// [asmjit::Assembler - Reloc]
// ============================================================================
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const {
if (baseAddress == kNoBaseAddress)
baseAddress = hasBaseAddress() ? getBaseAddress() : static_cast<Ptr>((uintptr_t)dst);
else if (getBaseAddress() != baseAddress)
return 0;
return _relocCode(dst, baseAddress);
}
// ============================================================================ // ============================================================================
// [asmjit::Assembler - Make] // [asmjit::Assembler - Make]
// ============================================================================ // ============================================================================
@@ -232,52 +321,52 @@ void* Assembler::make() {
// [asmjit::Assembler - Emit (Helpers)] // [asmjit::Assembler - Emit (Helpers)]
// ============================================================================ // ============================================================================
#define no noOperand #define NA noOperand
Error Assembler::emit(uint32_t code) { Error Assembler::emit(uint32_t code) {
return _emit(code, no, no, no, no); return _emit(code, NA, NA, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0) { Error Assembler::emit(uint32_t code, const Operand& o0) {
return _emit(code, o0, no, no, no); return _emit(code, o0, NA, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1) { Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1) {
return _emit(code, o0, o1, no, no); return _emit(code, o0, o1, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
return _emit(code, o0, o1, o2, no); return _emit(code, o0, o1, o2, NA);
} }
Error Assembler::emit(uint32_t code, int o0) { Error Assembler::emit(uint32_t code, int o0) {
Imm imm(o0); Imm imm(o0);
return _emit(code, imm, no, no, no); return _emit(code, imm, NA, NA, NA);
} }
Error Assembler::emit(uint32_t code, uint64_t o0) { Error Assembler::emit(uint32_t code, uint64_t o0) {
Imm imm(o0); Imm imm(o0);
return _emit(code, imm, no, no, no); return _emit(code, imm, NA, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, int o1) { Error Assembler::emit(uint32_t code, const Operand& o0, int o1) {
Imm imm(o1); Imm imm(o1);
return _emit(code, o0, imm, no, no); return _emit(code, o0, imm, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, uint64_t o1) { Error Assembler::emit(uint32_t code, const Operand& o0, uint64_t o1) {
Imm imm(o1); Imm imm(o1);
return _emit(code, o0, imm, no, no); return _emit(code, o0, imm, NA, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) { Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) {
Imm imm(o2); Imm imm(o2);
return _emit(code, o0, o1, imm, no); return _emit(code, o0, o1, imm, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2) { Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2) {
Imm imm(o2); Imm imm(o2);
return _emit(code, o0, o1, imm, no); return _emit(code, o0, o1, imm, NA);
} }
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) { Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) {
@@ -290,7 +379,7 @@ Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const
return _emit(code, o0, o1, o2, imm); return _emit(code, o0, o1, o2, imm);
} }
#undef no #undef NA
} // asmjit namespace } // asmjit namespace

View File

@@ -321,31 +321,58 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
// [Label] // [Label]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get count of labels created. //! Get number of labels created.
ASMJIT_INLINE size_t getLabelsCount() const { ASMJIT_INLINE size_t getLabelsCount() const {
return _labels.getLength(); return _labelList.getLength();
} }
//! Get whether `label` is created. //! Get whether the `label` is valid (created by the assembler).
ASMJIT_INLINE bool isLabelCreated(const Label& label) const { ASMJIT_INLINE bool isLabelValid(const Label& label) const {
return static_cast<size_t>(label.getId()) < _labels.getLength(); return isLabelValid(label.getId());
} }
//! \internal //! \overload
ASMJIT_INLINE bool isLabelValid(uint32_t id) const {
return static_cast<size_t>(id) < _labelList.getLength();
}
//! Get whether the `label` is bound.
//! //!
//! \note It's an error to pass label that is not valid. Check the validity
//! of the label by using `isLabelValid()` method before the bound check if
//! you are not sure about its validity, otherwise you may hit an assertion
//! failure in debug mode, and undefined behavior in release mode.
ASMJIT_INLINE bool isLabelBound(const Label& label) const {
return isLabelBound(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelBound(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id].offset != -1;
}
//! Get `label` offset or -1 if the label is not yet bound.
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id].offset;
}
//! Get `LabelData` by `label`. //! Get `LabelData` by `label`.
ASMJIT_INLINE LabelData* getLabelData(const Label& label) const { ASMJIT_INLINE LabelData* getLabelData(const Label& label) const {
return getLabelDataById(label.getId()); return getLabelData(label.getId());
} }
//! \internal //! \overload
//! ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const {
//! Get `LabelData` by `id`. ASMJIT_ASSERT(isLabelValid(id));
ASMJIT_INLINE LabelData* getLabelDataById(uint32_t id) const { return const_cast<LabelData*>(&_labelList[id]);
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(id < _labels.getLength());
return const_cast<LabelData*>(&_labels[id]);
} }
//! \internal //! \internal
@@ -370,22 +397,17 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
return result; return result;
} }
//! Bind label to the current offset.
virtual void _bind(const Label& label) = 0;
//! Bind label to the current offset. //! Bind label to the current offset.
//! //!
//! \note Label can be bound only once! //! \note Label can be bound only once!
ASMJIT_INLINE void bind(const Label& label) { ASMJIT_API virtual Error bind(const Label& label);
_bind(label);
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Embed] // [Embed]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Embed data into the code buffer. //! Embed data into the code buffer.
ASMJIT_API Error embed(const void* data, uint32_t size); ASMJIT_API virtual Error embed(const void* data, uint32_t size);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Align] // [Align]
@@ -396,57 +418,41 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
//! Typical usage of this is to align labels at start of the inner loops. //! Typical usage of this is to align labels at start of the inner loops.
//! //!
//! Inserts `nop()` instructions or CPU optimized NOPs. //! Inserts `nop()` instructions or CPU optimized NOPs.
ASMJIT_INLINE Error align(uint32_t mode, uint32_t offset) { virtual Error align(uint32_t mode, uint32_t offset) = 0;
return _align(mode, offset);
}
//! \internal
//!
//! Align target buffer to `m` bytes.
virtual Error _align(uint32_t mode, uint32_t offset) = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Reloc] // [Reloc]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Simplifed version of `relocCode()` method designed for JIT. //! Relocate the code to `baseAddress` and copy to `dst`.
//! //!
//! \overload //! \param dst Contains the location where the relocated code should be
ASMJIT_INLINE size_t relocCode(void* dst) const { //! copied. The pointer can be address returned by virtual memory allocator
return _relocCode(dst, static_cast<Ptr>((uintptr_t)dst)); //! or any other address that has sufficient space.
}
//! Relocate code to a given address `dst`.
//! //!
//! \param dst Refers the location where the relocated code should be copied. //! \param base Base address used for relocation. The `JitRuntime` always
//! The pointer can be address returned by virtual memory allocator or any //! sets the `base` address to be the same as `dst`, but other runtimes, for
//! custom address. //! example `StaticRuntime`, do not have to follow this rule.
//! //!
//! \param base Base address used for relocation. `JitRuntime` always sets //! \retval The number bytes actually used. If the code generator reserved
//! `base` address to be the same as `dst`, but other runtimes do not have //! space for possible trampolines, but didn't use it, the number of bytes
//! to follow this rule. //! used can actually be less than the expected worst case. Virtual memory
//! allocator can shrink the memory allocated first time.
//! //!
//! \retval The number bytes used. If the code generator reserved space for //! A given buffer will be overwritten, to get the number of bytes required,
//! possible trampolines, but these weren't generated, the number of bytes //! use `getCodeSize()`.
//! used can be actually less than the expected worst case. Virtual memory ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const;
//! allocator can in such case return some memory back to the pool.
//!
//! A given buffer will be overwritten, to get number of bytes required use
//! `getCodeSize()`.
ASMJIT_INLINE size_t relocCode(void* dst, Ptr base) const {
return _relocCode(dst, base);
}
//! \internal //! \internal
//! //!
//! Reloc code. //! Reloc code.
virtual size_t _relocCode(void* dst, Ptr base) const = 0; virtual size_t _relocCode(void* dst, Ptr baseAddress) const = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Make] // [Make]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
ASMJIT_API void* make(); ASMJIT_API virtual void* make();
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Emit] // [Emit]
@@ -482,8 +488,8 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
//! \overload //! \overload
ASMJIT_API Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3); ASMJIT_API Error emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3);
//! Emit an instruction (virtual). //! Emit an instruction (virtual).
virtual Error _emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) = 0; virtual Error _emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Members] // [Members]
@@ -504,16 +510,15 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
//! Size of possible trampolines. //! Size of possible trampolines.
uint32_t _trampolineSize; uint32_t _trampolineSize;
//! Inline comment that will be logged by the next instruction and //! Inline comment that will be logged by the next instruction and set to NULL.
//! set to NULL.
const char* _comment; const char* _comment;
//! Unused `LabelLink` structures pool. //! Unused `LabelLink` structures pool.
LabelLink* _unusedLinks; LabelLink* _unusedLinks;
//! Labels data. //! LabelData list.
PodVector<LabelData> _labels; PodVector<LabelData> _labelList;
//! Relocations data. //! RelocData list.
PodVector<RelocData> _relocData; PodVector<RelocData> _relocList;
}; };
//! \} //! \}

View File

@@ -24,11 +24,13 @@ CodeGen::CodeGen(Runtime* runtime) :
_runtime(runtime), _runtime(runtime),
_logger(NULL), _logger(NULL),
_errorHandler(NULL), _errorHandler(NULL),
_baseAddress(runtime->getBaseAddress()),
_arch(kArchNone), _arch(kArchNone),
_regSize(0), _regSize(0),
_features(static_cast<uint8_t>(IntUtil::mask(kCodeGenOptimizedAlign))), _reserved(0),
_features(IntUtil::mask(kCodeGenOptimizedAlign)),
_instOptions(0),
_error(kErrorOk), _error(kErrorOk),
_options(0),
_baseZone(16384 - kZoneOverhead) {} _baseZone(16384 - kZoneOverhead) {}
CodeGen::~CodeGen() { CodeGen::~CodeGen() {
@@ -103,28 +105,6 @@ Error CodeGen::setErrorHandler(ErrorHandler* handler) {
return kErrorOk; return kErrorOk;
} }
// ============================================================================
// [asmjit::CodeGen - Features]
// ============================================================================
bool CodeGen::hasFeature(uint32_t feature) const {
if (feature >= sizeof(_features) * 8)
return false;
feature = 1 << feature;
return (_features & feature) != 0;
}
Error CodeGen::setFeature(uint32_t feature, bool value) {
if (feature >= sizeof(_features) * 8)
return setError(kErrorInvalidArgument);
feature = static_cast<uint32_t>(value) << feature;
_features = static_cast<uint8_t>((static_cast<uint32_t>(_features) & ~feature) | feature);
return kErrorOk;
}
} // asmjit namespace } // asmjit namespace
// [Api-End] // [Api-End]

View File

@@ -31,7 +31,7 @@ ASMJIT_ENUM(kCodeGen) {
//! Emit optimized code-alignment sequences (`Assembler` and `Compiler`). //! Emit optimized code-alignment sequences (`Assembler` and `Compiler`).
//! //!
//! Default `true`. //! Default `true`.
//! //!
//! X86/X64 //! X86/X64
//! ------- //! -------
//! //!
@@ -65,7 +65,7 @@ ASMJIT_ENUM(kCodeGen) {
//! Schedule instructions so they can be executed faster (`Compiler` only). //! Schedule instructions so they can be executed faster (`Compiler` only).
//! //!
//! Default `false`, has to be explicitly enabled because it scheduler needs //! Default `false` - has to be explicitly enabled as the scheduler needs
//! some time to run. //! some time to run.
//! //!
//! X86/X64 //! X86/X64
@@ -97,9 +97,13 @@ ASMJIT_ENUM(kAlignMode) {
//! Relocation mode. //! Relocation mode.
ASMJIT_ENUM(kRelocMode) { ASMJIT_ENUM(kRelocMode) {
//! Relocate an absolute address to an absolute address.
kRelocAbsToAbs = 0, kRelocAbsToAbs = 0,
//! Relocate a relative address to an absolute address.
kRelocRelToAbs = 1, kRelocRelToAbs = 1,
//! Relocate an absolute address to a relative address.
kRelocAbsToRel = 2, kRelocAbsToRel = 2,
//! Relocate an absolute address to a relative address or use trampoline.
kRelocTrampoline = 3 kRelocTrampoline = 3
}; };
@@ -163,7 +167,33 @@ struct ASMJIT_VCLASS CodeGen {
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Error] // [BaseAddress]
// --------------------------------------------------------------------------
//! Get whether the code-generator has a base address.
//!
//! \sa \ref getBaseAddress()
ASMJIT_INLINE bool hasBaseAddress() const {
return _baseAddress != kNoBaseAddress;
}
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
//! Set the base address to `baseAddress`.
ASMJIT_INLINE void setBaseAddress(Ptr baseAddress) {
_baseAddress = baseAddress;
}
//! Reset the base address.
ASMJIT_INLINE void resetBaseAddress() {
setBaseAddress(kNoBaseAddress);
}
// --------------------------------------------------------------------------
// [LastError / ErrorHandler]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get last error code. //! Get last error code.
@@ -193,35 +223,55 @@ struct ASMJIT_VCLASS CodeGen {
} }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Features] // [Code-Generation Features]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get code-generator `feature`. //! Get code-generator `feature`.
ASMJIT_API bool hasFeature(uint32_t feature) const; ASMJIT_INLINE bool hasFeature(uint32_t feature) const {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
}
//! Set code-generator `feature` to `value`. //! Set code-generator `feature` to `value`.
ASMJIT_API Error setFeature(uint32_t feature, bool value); ASMJIT_INLINE void setFeature(uint32_t feature, bool value) {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
}
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const {
return _features;
}
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) {
_features = features;
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Options] // [Instruction Options]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get options of the next instruction. //! Get options of the next instruction.
ASMJIT_INLINE uint32_t getOptions() const { ASMJIT_INLINE uint32_t getInstOptions() const {
return _options; return _instOptions;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setOptions(uint32_t options) {
_options = options;
} }
//! Get options of the next instruction and reset them. //! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getOptionsAndReset() { ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
uint32_t options = _options; uint32_t instOptions = _instOptions;
_options = 0; _instOptions = 0;
return options; return instOptions;
}; };
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) {
_instOptions = instOptions;
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Make] // [Make]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -238,31 +288,39 @@ struct ASMJIT_VCLASS CodeGen {
// [Members] // [Members]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Runtime. //! Target runtime.
Runtime* _runtime; Runtime* _runtime;
#if !defined(ASMJIT_DISABLE_LOGGER) #if !defined(ASMJIT_DISABLE_LOGGER)
//! Logger. //! Logger.
Logger* _logger; Logger* _logger;
#else #else
// Makes the libraries built with/without logging support binary compatible. //! \internal
//!
//! Makes libraries built with or without logging support binary compatible.
void* _logger; void* _logger;
#endif // ASMJIT_DISABLE_LOGGER #endif // ASMJIT_DISABLE_LOGGER
//! Error handler, called by \ref setError(). //! Error handler, called by \ref setError().
ErrorHandler* _errorHandler; ErrorHandler* _errorHandler;
//! Target architecture. //! Base address (-1 if unknown/not used).
uint8_t _arch; Ptr _baseAddress;
//! Target general-purpose register size (4 or 8 bytes).
uint8_t _regSize;
//! Target features.
uint8_t _features;
//! Last error code.
uint8_t _error;
//! Options for the next generated instruction (only 8-bits used). //! Target architecture ID.
uint32_t _options; uint8_t _arch;
//! Target architecture GP register size in bytes (4 or 8).
uint8_t _regSize;
//! \internal
uint16_t _reserved;
//! Code-Generation features, used by \ref hasFeature() and \ref setFeature().
uint32_t _features;
//! Options affecting the next instruction.
uint32_t _instOptions;
//! Last error code.
uint32_t _error;
//! Base zone. //! Base zone.
Zone _baseZone; Zone _baseZone;

View File

@@ -44,6 +44,7 @@ Compiler::Compiler(Runtime* runtime) :
_nodeFlags(0), _nodeFlags(0),
_maxLookAhead(kBaseCompilerDefaultLookAhead), _maxLookAhead(kBaseCompilerDefaultLookAhead),
_targetVarMapping(NULL), _targetVarMapping(NULL),
_assembler(NULL),
_firstNode(NULL), _firstNode(NULL),
_lastNode(NULL), _lastNode(NULL),
_cursor(NULL), _cursor(NULL),
@@ -55,7 +56,10 @@ Compiler::Compiler(Runtime* runtime) :
_globalConstPool(&_baseZone) {} _globalConstPool(&_baseZone) {}
Compiler::~Compiler() { Compiler::~Compiler() {
reset(); reset(true);
if (_assembler != NULL)
delete _assembler;
} }
// ============================================================================ // ============================================================================
@@ -64,14 +68,19 @@ Compiler::~Compiler() {
void Compiler::reset(bool releaseMemory) { void Compiler::reset(bool releaseMemory) {
// CodeGen members. // CodeGen members.
_baseAddress = kNoBaseAddress;
_instOptions = 0;
_error = kErrorOk; _error = kErrorOk;
_options = 0;
_baseZone.reset(releaseMemory); _baseZone.reset(releaseMemory);
// Compiler members. // Compiler members.
_nodeFlowId = 0; _nodeFlowId = 0;
_nodeFlags = 0; _nodeFlags = 0;
if (_assembler != NULL)
_assembler->reset(releaseMemory);
_firstNode = NULL; _firstNode = NULL;
_lastNode = NULL; _lastNode = NULL;
@@ -88,8 +97,8 @@ void Compiler::reset(bool releaseMemory) {
_stringZone.reset(releaseMemory); _stringZone.reset(releaseMemory);
_localConstZone.reset(releaseMemory); _localConstZone.reset(releaseMemory);
_targets.reset(releaseMemory); _targetList.reset(releaseMemory);
_vars.reset(releaseMemory); _varList.reset(releaseMemory);
} }
// ============================================================================ // ============================================================================
@@ -293,9 +302,9 @@ AlignNode* Compiler::addAlign(uint32_t mode, uint32_t offset) {
TargetNode* Compiler::newTarget() { TargetNode* Compiler::newTarget() {
TargetNode* node = newNode<TargetNode>( TargetNode* node = newNode<TargetNode>(
OperandUtil::makeLabelId(static_cast<uint32_t>(_targets.getLength()))); OperandUtil::makeLabelId(static_cast<uint32_t>(_targetList.getLength())));
if (node == NULL || _targets.append(node) != kErrorOk) if (node == NULL || _targetList.append(node) != kErrorOk)
goto _NoMemory; goto _NoMemory;
return node; return node;
@@ -330,11 +339,12 @@ _NoMemory:
return setError(kErrorNoHeapMemory); return setError(kErrorNoHeapMemory);
} }
void Compiler::bind(const Label& label) { Error Compiler::bind(const Label& label) {
uint32_t index = label.getId(); uint32_t index = label.getId();
ASMJIT_ASSERT(index < _targets.getLength()); ASMJIT_ASSERT(index < _targetList.getLength());
addNode(_targets[index]); addNode(_targetList[index]);
return kErrorOk;
} }
// ============================================================================ // ============================================================================
@@ -460,7 +470,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
goto _NoMemory; goto _NoMemory;
vd->_name = noName; vd->_name = noName;
vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_vars.getLength())); vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_varList.getLength()));
vd->_contextId = kInvalidValue; vd->_contextId = kInvalidValue;
if (name != NULL && name[0] != '\0') { if (name != NULL && name[0] != '\0') {
@@ -495,7 +505,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
vd->_va = NULL; vd->_va = NULL;
if (_vars.append(vd) != kErrorOk) if (_varList.append(vd) != kErrorOk)
goto _NoMemory; goto _NoMemory;
return vd; return vd;
@@ -575,6 +585,33 @@ void Compiler::rename(Var& var, const char* name) {
} }
} }
// ============================================================================
// [asmjit::Compiler - Assembler]
// ============================================================================
Assembler* Compiler::getAssembler() {
Assembler* a = _assembler;
if (a != NULL) {
a->reset(false);
}
else {
a = _newAssembler();
_assembler = a;
}
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger != NULL)
a->setLogger(logger);
#endif // !ASMJIT_DISABLE_LOGGER
a->setBaseAddress(_baseAddress);
a->setFeatures(_features);
return a;
}
} // asmjit namespace } // asmjit namespace
// [Api-End] // [Api-End]

View File

@@ -2023,6 +2023,7 @@ struct TargetNode : public Node {
ASMJIT_INLINE TargetNode(Compiler* compiler, uint32_t labelId) : Node(compiler, kNodeTypeTarget) { ASMJIT_INLINE TargetNode(Compiler* compiler, uint32_t labelId) : Node(compiler, kNodeTypeTarget) {
_id = labelId; _id = labelId;
_numRefs = 0; _numRefs = 0;
_offset = -1;
_from = NULL; _from = NULL;
} }
@@ -2058,6 +2059,14 @@ struct TargetNode : public Node {
//! Subtract number of jumps to this target. //! Subtract number of jumps to this target.
ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; } ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; }
//! Get the label offset.
//!
//! \note Only valid after the content has been serialized to the `Assembler`.
ASMJIT_INLINE intptr_t getOffset() const { return _offset; }
//! Set the label offset.
ASMJIT_INLINE void setOffset(intptr_t offset) { _offset = offset; }
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Members] // [Members]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -2067,6 +2076,8 @@ struct TargetNode : public Node {
//! Count of jumps here. //! Count of jumps here.
uint32_t _numRefs; uint32_t _numRefs;
//! Label offset, after serialization.
intptr_t _offset;
//! First jump instruction that points to this target (label). //! First jump instruction that points to this target (label).
JumpNode* _from; JumpNode* _from;
}; };
@@ -2454,16 +2465,15 @@ struct FuncNode : public Node {
//! Required stack alignment (usually for multimedia instructions). //! Required stack alignment (usually for multimedia instructions).
uint32_t _requiredStackAlignment; uint32_t _requiredStackAlignment;
//! The "Red Zone" suze - count of bytes which might be accessed //! The "Red Zone" size - count of bytes which might be accessed without
//! without adjusting the stack pointer. //! adjusting the stack pointer.
uint16_t _redZoneSize; uint16_t _redZoneSize;
//! Spill zone size (zone used by WIN64ABI). //! Spill zone size (zone used by WIN64ABI).
uint16_t _spillZoneSize; uint16_t _spillZoneSize;
//! Stack size needed for function arguments. //! Stack size needed for function arguments.
uint32_t _argStackSize; uint32_t _argStackSize;
//! Stack size needed for all variables and memory allocated on //! Stack size needed for all variables and memory allocated on the stack.
//! the stack.
uint32_t _memStackSize; uint32_t _memStackSize;
//! Stack size needed to call other functions. //! Stack size needed to call other functions.
uint32_t _callStackSize; uint32_t _callStackSize;
@@ -2683,9 +2693,14 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get maximum look ahead. //! Get maximum look ahead.
ASMJIT_INLINE uint32_t getMaxLookAhead() const { return _maxLookAhead; } ASMJIT_INLINE uint32_t getMaxLookAhead() const {
return _maxLookAhead;
}
//! Set maximum look ahead to `val`. //! Set maximum look ahead to `val`.
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { _maxLookAhead = val; } ASMJIT_INLINE void setMaxLookAhead(uint32_t val) {
_maxLookAhead = val;
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Clear / Reset] // [Clear / Reset]
@@ -2787,9 +2802,9 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get `TargetNode` by `id`. //! Get `TargetNode` by `id`.
ASMJIT_INLINE TargetNode* getTargetById(uint32_t id) { ASMJIT_INLINE TargetNode* getTargetById(uint32_t id) {
ASMJIT_ASSERT(OperandUtil::isLabelId(id)); ASMJIT_ASSERT(OperandUtil::isLabelId(id));
ASMJIT_ASSERT(id < _targets.getLength()); ASMJIT_ASSERT(id < _targetList.getLength());
return _targets[id]; return _targetList[id];
} }
//! Get `TargetNode` by `label`. //! Get `TargetNode` by `label`.
@@ -2803,12 +2818,43 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get count of created labels. //! Get count of created labels.
ASMJIT_INLINE size_t getLabelsCount() const { ASMJIT_INLINE size_t getLabelsCount() const {
return _targets.getLength(); return _targetList.getLength();
} }
//! Get whether `label` is created. //! Get whether `label` is created.
ASMJIT_INLINE bool isLabelCreated(const Label& label) const { ASMJIT_INLINE bool isLabelValid(const Label& label) const {
return static_cast<size_t>(label.getId()) < _targets.getLength(); return isLabelValid(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelValid(uint32_t id) const {
return static_cast<size_t>(id) < _targetList.getLength();
}
//! Get `TargetNode` by `label`.
ASMJIT_INLINE TargetNode* getTargetByLabel(const Label& label) {
return getTargetByLabel(label.getId());
}
//! \overload
ASMJIT_INLINE TargetNode* getTargetByLabel(uint32_t id) {
ASMJIT_ASSERT(isLabelValid(id));
return _targetList[id];
}
//! Get `label` offset or -1 if the label is not bound.
//!
//! This method can be only called after the code has been serialized to the
//! `Assembler`, otherwise the offset returned will be -1 (even if the label
//! has been bound).
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _targetList[id]->getOffset();
} }
//! \internal //! \internal
@@ -2826,7 +2872,7 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Bind label to the current offset. //! Bind label to the current offset.
//! //!
//! \note Label can be bound only once! //! \note Label can be bound only once!
ASMJIT_API void bind(const Label& label); ASMJIT_API Error bind(const Label& label);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Embed] // [Embed]
@@ -2868,8 +2914,8 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get whether variable `var` is created. //! Get whether variable `var` is created.
ASMJIT_INLINE bool isVarCreated(const Var& var) const { ASMJIT_INLINE bool isVarValid(const Var& var) const {
return static_cast<size_t>(var.getId() & kOperandIdNum) < _vars.getLength(); return static_cast<size_t>(var.getId() & kOperandIdNum) < _varList.getLength();
} }
//! \internal //! \internal
@@ -2884,16 +2930,16 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get `VarData` by `id`. //! Get `VarData` by `id`.
ASMJIT_INLINE VarData* getVdById(uint32_t id) const { ASMJIT_INLINE VarData* getVdById(uint32_t id) const {
ASMJIT_ASSERT(id != kInvalidValue); ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(static_cast<size_t>(id & kOperandIdNum) < _vars.getLength()); ASMJIT_ASSERT(static_cast<size_t>(id & kOperandIdNum) < _varList.getLength());
return _vars[id & kOperandIdNum]; return _varList[id & kOperandIdNum];
} }
//! \internal //! \internal
//! //!
//! Get an array of 'VarData*'. //! Get an array of 'VarData*'.
ASMJIT_INLINE VarData** _getVdArray() const { ASMJIT_INLINE VarData** _getVdArray() const {
return const_cast<VarData**>(_vars.getData()); return const_cast<VarData**>(_varList.getData());
} }
//! \internal //! \internal
@@ -2950,17 +2996,36 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Put data to a constant-pool and get a memory reference to it. //! Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0; virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0;
// --------------------------------------------------------------------------
// [Assembler]
// --------------------------------------------------------------------------
//! Get an assembler instance that is associated with the compiler.
//!
//! \note One instance of `Assembler` is shared and has lifetime same as the
//! compiler, however, each call to `getAssembler()` resets the assembler so
//! new code can be serialized into it.
ASMJIT_API Assembler* getAssembler();
//! \internal
//!
//! Create a new `Assembler` instance associated with the compiler.
virtual Assembler* _newAssembler() = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Serialize] // [Serialize]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Send assembled code to `assembler`. //! Serialize a compiled code to `assembler`.
virtual Error serialize(Assembler& assembler) = 0; virtual Error serialize(Assembler* assembler) = 0;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Members] // [Members]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Internal assembler.
Assembler* _assembler;
//! Flow id added to each node created (used only by `Context)`. //! Flow id added to each node created (used only by `Context)`.
uint32_t _nodeFlowId; uint32_t _nodeFlowId;
//! Flags added to each node created (used only by `Context)`. //! Flags added to each node created (used only by `Context)`.
@@ -2990,10 +3055,10 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Local constant pool zone. //! Local constant pool zone.
Zone _localConstZone; Zone _localConstZone;
//! Targets. //! TargetNode list.
PodVector<TargetNode*> _targets; PodVector<TargetNode*> _targetList;
//! Variables. //! VarData list.
PodVector<VarData*> _vars; PodVector<VarData*> _varList;
//! Local constant pool, flushed at the end of each function. //! Local constant pool, flushed at the end of each function.
ConstPool _localConstPool; ConstPool _localConstPool;

View File

@@ -30,7 +30,7 @@
namespace asmjit { namespace asmjit {
// ============================================================================ // ============================================================================
// [asmjit::CpuInfo - DetectNumberOfCores] // [asmjit::CpuInfo - DetectHwThreadsCount]
// ============================================================================ // ============================================================================
uint32_t CpuInfo::detectHwThreadsCount() { uint32_t CpuInfo::detectHwThreadsCount() {

View File

@@ -102,7 +102,7 @@ struct CpuInfo {
// [Statics] // [Statics]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Detect number of cores (or sum of all cores of all processors). //! Detect the number of hardware threads.
static ASMJIT_API uint32_t detectHwThreadsCount(); static ASMJIT_API uint32_t detectHwThreadsCount();
//! Get host cpu. //! Get host cpu.

View File

@@ -44,11 +44,13 @@ static const char errorMessages[] = {
"No virtual memory\0" "No virtual memory\0"
"Invalid argument\0" "Invalid argument\0"
"Invalid state\0" "Invalid state\0"
"No code generated\0"
"Code too large\0"
"Label already bound\0"
"Unknown instruction\0" "Unknown instruction\0"
"Illegal instruction\0" "Illegal instruction\0"
"Illegal addressing\0" "Illegal addressing\0"
"Illegal displacement\0" "Illegal displacement\0"
"Invalid function\0"
"Overlapped arguments\0" "Overlapped arguments\0"
"Unknown error\0" "Unknown error\0"
}; };

View File

@@ -42,11 +42,25 @@ ASMJIT_ENUM(kError) {
//! Invalid state. //! Invalid state.
kErrorInvalidState = 4, kErrorInvalidState = 4,
//! Unknown instruction. This happens only if instruction code is //! No code generated.
//! out of bounds. Shouldn't happen. //!
kErrorUnknownInst = 5, //! Returned by runtime if the code-generator contains no code.
kErrorNoCodeGenerated = 5,
//! Illegal instruction (Assembler). //! Code generated is too large to fit in memory reserved.
//!
//! Returned by `StaticRuntime` in case that the code generated is too large
//! to fit in the memory already reserved for it.
kErrorCodeTooLarge = 6,
//! Label is already bound.
kErrorLabelAlreadyBound = 7,
//! Unknown instruction (an instruction ID is out of bounds or instruction
//! name is invalid).
kErrorUnknownInst = 8,
//! Illegal instruction.
//! //!
//! This status code can also be returned in X64 mode if AH, BH, CH or DH //! This status code can also be returned in X64 mode if AH, BH, CH or DH
//! registers have been used together with a REX prefix. The instruction //! registers have been used together with a REX prefix. The instruction
@@ -64,30 +78,25 @@ ASMJIT_ENUM(kError) {
//! ~~~ //! ~~~
//! //!
//! \note In debug mode assertion is raised instead of returning an error. //! \note In debug mode assertion is raised instead of returning an error.
kErrorIllegalInst = 6, kErrorIllegalInst = 9,
//! Illegal (unencodable) addressing used (Assembler). //! Illegal (unencodable) addressing used.
kErrorIllegalAddresing = 7, kErrorIllegalAddresing = 10,
//! Illegal (unencodable) displacement used (Assembler). //! Illegal (unencodable) displacement used.
//! //!
//! X86/X64 //! X86/X64
//! ------- //! -------
//! //!
//! Short form of jump instruction has been used, but the displacement is out //! Short form of jump instruction has been used, but the displacement is out
//! of bounds. //! of bounds.
kErrorIllegalDisplacement = 8, kErrorIllegalDisplacement = 11,
//! Invalid function (Compiler).
//!
//! Returned if no function is defined, but `make()` has been called.
kErrorInvalidFunction = 9,
//! A variable has been assigned more than once to a function argument (Compiler). //! A variable has been assigned more than once to a function argument (Compiler).
kErrorOverlappedArgs = 10, kErrorOverlappedArgs = 12,
//! Count of AsmJit status codes. Can grow in future. //! Count of AsmJit error codes.
kErrorCount = 11 kErrorCount = 13
}; };
// ============================================================================ // ============================================================================

View File

@@ -19,6 +19,20 @@ namespace asmjit {
//! \addtogroup asmjit_base_general //! \addtogroup asmjit_base_general
//! \{ //! \{
// ============================================================================
// [asmjit::Ptr / SignedPtr]
// ============================================================================
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for cross-platform code generation.
typedef uint64_t Ptr;
//! 64-bit signed pointer, like \ref Ptr, but made signed.
typedef int64_t SignedPtr;
// ============================================================================ // ============================================================================
// [asmjit::kGlobals] // [asmjit::kGlobals]
// ============================================================================ // ============================================================================
@@ -30,6 +44,9 @@ namespace asmjit {
//! string is not known and has to be determined. //! string is not known and has to be determined.
static const size_t kInvalidIndex = ~static_cast<size_t>(0); static const size_t kInvalidIndex = ~static_cast<size_t>(0);
//! Invalid base address.
static const Ptr kNoBaseAddress = static_cast<Ptr>(static_cast<SignedPtr>(-1));
//! Global constants. //! Global constants.
ASMJIT_ENUM(kGlobals) { ASMJIT_ENUM(kGlobals) {
//! Invalid value or operand id. //! Invalid value or operand id.
@@ -89,20 +106,6 @@ ASMJIT_ENUM(kArch) {
kArchHost64Bit = sizeof(intptr_t) >= 8 kArchHost64Bit = sizeof(intptr_t) >= 8
}; };
// ============================================================================
// [asmjit::Ptr / SignedPtr]
// ============================================================================
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for cross-platform code generation.
typedef uint64_t Ptr;
//! 64-bit signed pointer, like \ref Ptr, but made signed.
typedef int64_t SignedPtr;
//! \} //! \}
// ============================================================================ // ============================================================================

View File

@@ -22,39 +22,53 @@ namespace asmjit {
// [asmjit::Runtime - Construction / Destruction] // [asmjit::Runtime - Construction / Destruction]
// ============================================================================ // ============================================================================
Runtime::Runtime() {} Runtime::Runtime() {
_sizeLimit = 0;
_runtimeType = kRuntimeTypeNone;
_allocType = kVMemAllocFreeable;
::memset(_reserved, 0, sizeof(_reserved));
_baseAddress = kNoBaseAddress;
}
Runtime::~Runtime() {} Runtime::~Runtime() {}
// ============================================================================ // ============================================================================
// [asmjit::JitRuntime - Construction / Destruction] // [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================ // ============================================================================
JitRuntime::JitRuntime() : HostRuntime::HostRuntime() {
_allocType(kVMemAllocFreeable) {} _runtimeType = kRuntimeTypeJit;
}
JitRuntime::~JitRuntime() {} HostRuntime::~HostRuntime() {}
// ============================================================================ // ============================================================================
// [asmjit::JitRuntime - Get] // [asmjit::HostRuntime - Interface]
// ============================================================================ // ============================================================================
uint32_t JitRuntime::getStackAlignment() { const CpuInfo* HostRuntime::getCpuInfo() {
return CpuInfo::getHost();
}
uint32_t HostRuntime::getStackAlignment() {
uint32_t alignment = sizeof(intptr_t); uint32_t alignment = sizeof(intptr_t);
#if defined(ASMJIT_HOST_X86) #if defined(ASMJIT_HOST_X86)
// Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm // Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm
// not sure about all other UNIX operating systems, because 16-byte alignment // not sure about all other UNIX operating systems, because 16-byte alignment
// is addition to an older specification. // is addition to an older specification.
#if (defined(__linux__) || \ # if (defined(__linux__) || \
defined(__linux) || \ defined(__linux) || \
defined(__unix__) || \ defined(__unix__) || \
defined(__FreeBSD__) || \ defined(__FreeBSD__) || \
defined(__NetBSD__) || \ defined(__NetBSD__) || \
defined(__OpenBSD__) || \ defined(__OpenBSD__) || \
defined(__DARWIN__) || \ defined(__DARWIN__) || \
defined(__APPLE__) ) defined(__APPLE__) )
alignment = 16; alignment = 16;
#endif # endif
#elif defined(ASMJIT_HOST_X64) #elif defined(ASMJIT_HOST_X64)
alignment = 16; alignment = 16;
#endif #endif
@@ -62,49 +76,7 @@ uint32_t JitRuntime::getStackAlignment() {
return alignment; return alignment;
} }
const CpuInfo* JitRuntime::getCpuInfo() { void HostRuntime::flush(void* p, size_t size) {
return CpuInfo::getHost();
}
// ============================================================================
// [asmjit::JitRuntime - Add]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) {
// Disallow empty code generation.
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = NULL;
return kErrorInvalidFunction;
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == NULL) {
*dst = NULL;
return kErrorNoVirtualMemory;
}
// Relocate the code.
size_t relocSize = assembler->relocCode(p);
// Return unused memory to `VMemMgr`.
if (relocSize < codeSize)
_memMgr.shrink(p, relocSize);
// Return the code.
*dst = p;
flush(p, relocSize);
return kErrorOk;
}
Error JitRuntime::release(void* p) {
return _memMgr.release(p);
}
void JitRuntime::flush(void* p, size_t size) {
// Only useful on non-x86 architectures. // Only useful on non-x86 architectures.
#if !defined(ASMJIT_HOST_X86) && !defined(ASMJIT_HOST_X64) #if !defined(ASMJIT_HOST_X86) && !defined(ASMJIT_HOST_X64)
@@ -116,6 +88,103 @@ void JitRuntime::flush(void* p, size_t size) {
#endif // !ASMJIT_HOST_X86 && !ASMJIT_HOST_X64 #endif // !ASMJIT_HOST_X86 && !ASMJIT_HOST_X64
} }
// ============================================================================
// [asmjit::StaticRuntime - Construction / Destruction]
// ============================================================================
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) {
_sizeLimit = sizeLimit;
_baseAddress = static_cast<Ptr>((uintptr_t)baseAddress);
}
StaticRuntime::~StaticRuntime() {}
// ============================================================================
// [asmjit::StaticRuntime - Interface]
// ============================================================================
Error StaticRuntime::add(void** dst, Assembler* assembler) {
size_t codeSize = assembler->getCodeSize();
size_t sizeLimit = _sizeLimit;
if (codeSize == 0) {
*dst = NULL;
return kErrorNoCodeGenerated;
}
if (sizeLimit != 0 && sizeLimit < codeSize) {
*dst = NULL;
return kErrorCodeTooLarge;
}
Ptr baseAddress = _baseAddress;
uint8_t* p = static_cast<uint8_t*>((void*)static_cast<uintptr_t>(baseAddress));
// Since the base address is known the `relocSize` returned should be equal
// to `codeSize`. It's better to fail if they don't match instead of passsing
// silently.
size_t relocSize = assembler->relocCode(p, baseAddress);
if (relocSize == 0 || codeSize != relocSize) {
*dst = NULL;
return kErrorInvalidState;
}
_baseAddress += codeSize;
if (sizeLimit)
sizeLimit -= codeSize;
flush(p, codeSize);
*dst = p;
return kErrorOk;
}
Error StaticRuntime::release(void* p) {
// There is nothing to release as `StaticRuntime` doesn't manage any memory.
ASMJIT_UNUSED(p);
return kErrorOk;
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() {}
JitRuntime::~JitRuntime() {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = NULL;
return kErrorNoCodeGenerated;
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == NULL) {
*dst = NULL;
return kErrorNoVirtualMemory;
}
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = assembler->relocCode(p);
if (relocSize < codeSize) {
_memMgr.shrink(p, relocSize);
}
flush(p, relocSize);
*dst = p;
return kErrorOk;
}
Error JitRuntime::release(void* p) {
return _memMgr.release(p);
}
} // asmjit namespace } // asmjit namespace
// [Api-End] // [Api-End]

View File

@@ -27,6 +27,17 @@ struct CpuInfo;
//! \addtogroup asmjit_base_general //! \addtogroup asmjit_base_general
//! \{ //! \{
// ============================================================================
// [asmjit::kRuntimeType]
// ============================================================================
ASMJIT_ENUM(kRuntimeType) {
kRuntimeTypeNone = 0,
kRuntimeTypeJit = 1,
kRuntimeTypeRemote = 2
};
// ============================================================================ // ============================================================================
// [asmjit::Runtime] // [asmjit::Runtime]
// ============================================================================ // ============================================================================
@@ -44,16 +55,37 @@ struct ASMJIT_VCLASS Runtime {
//! Destroy the `Runtime` instance. //! Destroy the `Runtime` instance.
ASMJIT_API virtual ~Runtime(); ASMJIT_API virtual ~Runtime();
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get runtime type.
ASMJIT_INLINE uint32_t getRuntimeType() const {
return _runtimeType;
}
//! Get whether the runtime has a base address.
//!
//! \sa \ref getBaseAddress()
ASMJIT_INLINE bool hasBaseAddress() const {
return _baseAddress == kNoBaseAddress;
}
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Interface] // [Interface]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Get stack alignment of target runtime.
virtual uint32_t getStackAlignment() = 0;
//! Get CPU information. //! Get CPU information.
virtual const CpuInfo* getCpuInfo() = 0; virtual const CpuInfo* getCpuInfo() = 0;
//! Get stack alignment of target runtime.
virtual uint32_t getStackAlignment() = 0;
//! Allocate a memory needed for a code generated by `assembler` and //! Allocate a memory needed for a code generated by `assembler` and
//! relocate it to the target location. //! relocate it to the target location.
//! //!
@@ -64,6 +96,108 @@ struct ASMJIT_VCLASS Runtime {
//! Release memory allocated by `add`. //! Release memory allocated by `add`.
virtual Error release(void* p) = 0; virtual Error release(void* p) = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Type of the runtime.
uint8_t _runtimeType;
//! Type of the allocation.
uint8_t _allocType;
//! \internal
uint8_t _reserved[sizeof(intptr_t) - 2];
};
// ============================================================================
// [asmjit::HostRuntime]
// ============================================================================
//! Base runtime for JIT code generation.
struct ASMJIT_VCLASS HostRuntime : public Runtime {
ASMJIT_NO_COPY(HostRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `HostRuntime` instance.
ASMJIT_API HostRuntime();
//! Destroy the `HostRuntime` instance.
ASMJIT_API virtual ~HostRuntime();
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual const CpuInfo* getCpuInfo();
ASMJIT_API virtual uint32_t getStackAlignment();
//! Flush an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor cache.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! that do not have a transparent instruction cache.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size);
};
// ============================================================================
// [asmjit::StaticRuntime]
// ============================================================================
//! JIT static runtime.
//!
//! JIT static runtime can be used to generate code to a memory location that
//! is known.
struct ASMJIT_VCLASS StaticRuntime : public HostRuntime {
ASMJIT_NO_COPY(StaticRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `StaticRuntime` instance.
//!
//! The `address` specifies a fixed target address, which will be used as a
//! base address for relocation, and `sizeLimit` specified the maximum size
//! of a code that can be copied to it. If there is no limit `sizeLimit`
//! should be zero.
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0);
//! Destroy the `StaticRuntime` instance.
ASMJIT_API virtual ~StaticRuntime();
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
//! Get the maximum size of the code that can be relocated to the target
//! address or zero if unlimited.
ASMJIT_INLINE size_t getSizeLimit() const {
return _sizeLimit;
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p);
}; };
// ============================================================================ // ============================================================================
@@ -71,7 +205,7 @@ struct ASMJIT_VCLASS Runtime {
// ============================================================================ // ============================================================================
//! JIT runtime. //! JIT runtime.
struct ASMJIT_VCLASS JitRuntime : public Runtime { struct ASMJIT_VCLASS JitRuntime : public HostRuntime {
ASMJIT_NO_COPY(JitRuntime) ASMJIT_NO_COPY(JitRuntime)
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -87,16 +221,6 @@ struct ASMJIT_VCLASS JitRuntime : public Runtime {
// [Accessors] // [Accessors]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// Note: These members can be ignored by all derived classes. They are here
// only to privide default implementation. All other implementations (remote
// code patching or making dynamic loadable libraries/executables) ignore
// members accessed by these accessors.
//! Get the `VMemMgr` instance.
ASMJIT_INLINE VMemMgr* getMemMgr() const {
return const_cast<VMemMgr*>(&_memMgr);
}
//! Get the type of allocation. //! Get the type of allocation.
ASMJIT_INLINE uint32_t getAllocType() const { ASMJIT_INLINE uint32_t getAllocType() const {
return _allocType; return _allocType;
@@ -107,37 +231,24 @@ struct ASMJIT_VCLASS JitRuntime : public Runtime {
_allocType = allocType; _allocType = allocType;
} }
//! Get the virtual memory manager.
ASMJIT_INLINE VMemMgr* getMemMgr() const {
return const_cast<VMemMgr*>(&_memMgr);
}
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Interface] // [Interface]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
ASMJIT_API virtual uint32_t getStackAlignment();
ASMJIT_API virtual const CpuInfo* getCpuInfo();
ASMJIT_API virtual Error add(void** dst, Assembler* assembler); ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p); ASMJIT_API virtual Error release(void* p);
//! Flush instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes to flush the processor cache so it will not use the old data.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! not having a transparent cache.
//!
//! This function can also be overridden to improve compatibility with tools
//! like Valgrind, but this is not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Members] // [Members]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
//! Virtual memory manager. //! Virtual memory manager.
VMemMgr _memMgr; VMemMgr _memMgr;
//! Type of allocation.
uint32_t _allocType;
}; };
//! \} //! \}

View File

@@ -105,7 +105,7 @@ struct VMemMgr {
#if !defined(ASMJIT_OS_WINDOWS) #if !defined(ASMJIT_OS_WINDOWS)
//! Create a `VMemMgr` instance. //! Create a `VMemMgr` instance.
ASMJIT_API VMemMgr(); ASMJIT_API VMemMgr();
#else #else
//! Create a `VMemMgr` instance. //! Create a `VMemMgr` instance.
//! //!
//! \note When running on Windows it's possible to specify a `hProcess` to //! \note When running on Windows it's possible to specify a `hProcess` to

View File

@@ -263,12 +263,13 @@
// ASMJIT_TRACE is only used by sources and private headers. It's safe to make // ASMJIT_TRACE is only used by sources and private headers. It's safe to make
// it unavailable outside of AsmJit. // it unavailable outside of AsmJit.
#if defined(ASMJIT_EXPORTS) #if defined(ASMJIT_EXPORTS)
namespace asmjit { static inline int disabledTrace(...) {} }
# if defined(ASMJIT_TRACE) # if defined(ASMJIT_TRACE)
# define ASMJIT_TSEC(_Section_) _Section_ # define ASMJIT_TSEC(_Section_) _Section_
# define ASMJIT_TLOG(...) ::printf(__VA_ARGS__) # define ASMJIT_TLOG ::printf(__VA_ARGS__)
# else # else
# define ASMJIT_TSEC(_Section_) do {} while(0) # define ASMJIT_TSEC(_Section_) do {} while(0)
# define ASMJIT_TLOG(...) do {} while(0) # define ASMJIT_TLOG 0 && ::asmjit::disabledTrace
# endif // ASMJIT_TRACE # endif // ASMJIT_TRACE
#endif // ASMJIT_EXPORTS #endif // ASMJIT_EXPORTS
@@ -347,10 +348,15 @@ typedef unsigned __int64 uint64_t;
#if defined(ASMJIT_OS_WINDOWS) && !defined(ASMJIT_SUPRESS_WINDOWS_H) #if defined(ASMJIT_OS_WINDOWS) && !defined(ASMJIT_SUPRESS_WINDOWS_H)
# if !defined(WIN32_LEAN_AND_MEAN)
# define WIN32_LEAN_AND_MEAN
# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif // !WIN32_LEAN_AND_MEAN
# if !defined(NOMINMAX) # if !defined(NOMINMAX)
# define NOMINMAX # define NOMINMAX
# define ASMJIT_UNDEF_NOMINMAX # define ASMJIT_UNDEF_NOMINMAX
# endif # endif // !NOMINMAX
# include <windows.h> # include <windows.h>
@@ -359,6 +365,11 @@ typedef unsigned __int64 uint64_t;
# undef ASMJIT_UNDEF_NOMINMAX # undef ASMJIT_UNDEF_NOMINMAX
# endif # endif
# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN)
# undef WIN32_LEAN_AND_MEAN
# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
#endif // ASMJIT_OS_WINDOWS && !ASMJIT_SUPRESS_WINDOWS_H #endif // ASMJIT_OS_WINDOWS && !ASMJIT_SUPRESS_WINDOWS_H
// ============================================================================ // ============================================================================

View File

@@ -11,7 +11,7 @@
// This file can be used to modify built-in features of AsmJit. AsmJit is by // This file can be used to modify built-in features of AsmJit. AsmJit is by
// default compiled only for host processor to enable JIT compilation. Both // default compiled only for host processor to enable JIT compilation. Both
// Assembler and Compiler code generators are compiled by default. // Assembler and Compiler code generators are compiled by default.
// //
// ASMJIT_BUILD_... flags can be defined to build additional backends that can // ASMJIT_BUILD_... flags can be defined to build additional backends that can
// be used for remote code generation. // be used for remote code generation.
// //

View File

@@ -4,6 +4,7 @@
// [License] // [License]
// Public Domain (Unlicense) // Public Domain (Unlicense)
// [Dependencies - Broken]
#include "./broken.h" #include "./broken.h"
// ============================================================================ // ============================================================================
@@ -125,30 +126,24 @@ static void BrokenAPI_runUnit(BrokenAPI::Unit* unit) {
static void BrokenAPI_runAll() { static void BrokenAPI_runAll() {
BrokenAPI::Unit* unit = _brokenGlobal._unitList; BrokenAPI::Unit* unit = _brokenGlobal._unitList;
if (unit != NULL) { bool hasUnits = unit != NULL;
size_t count = 0; size_t count = 0;
do { while (unit != NULL) {
if (BrokenAPI_canRun(unit)) { if (BrokenAPI_canRun(unit)) {
BrokenAPI_runUnit(unit); BrokenAPI_runUnit(unit);
count++; count++;
}
unit = unit->next;
} while (unit != NULL);
if (count) {
INFO("\nSuccess:");
INFO(" All tests passed!");
}
else {
INFO("\nWarning:");
INFO(" No units matched the filter!");
} }
unit = unit->next;
}
if (count) {
INFO("\nSuccess:");
INFO(" All tests passed!");
} }
else { else {
INFO("\nWarning:"); INFO("\nWarning:");
INFO(" No units defined!"); INFO(" No units %s!", hasUnits ? "matched the filter" : "defined");
} }
} }
@@ -159,7 +154,7 @@ static void BrokenAPI_listAll() {
INFO("Units:"); INFO("Units:");
do { do {
INFO(" %s", unit->name); INFO(" %s", unit->name);
unit = unit->next; unit = unit->next;
} while (unit != NULL); } while (unit != NULL);
} }
else { else {
@@ -192,11 +187,13 @@ void BrokenAPI::setOutputFile(FILE* file) {
global._file = file; global._file = file;
} }
void BrokenAPI::setContext(const char* file, int line) { int BrokenAPI::setContext(const char* file, int line) {
BrokenGlobal& global = _brokenGlobal; BrokenGlobal& global = _brokenGlobal;
global._currentFile = file; global._currentFile = file;
global._currentLine = line; global._currentLine = line;
return 1;
} }
int BrokenAPI::run(int argc, const char* argv[], int BrokenAPI::run(int argc, const char* argv[],
@@ -234,7 +231,7 @@ int BrokenAPI::run(int argc, const char* argv[],
return 0; return 0;
} }
void BrokenAPI::info(const char* fmt, ...) { int BrokenAPI::info(const char* fmt, ...) {
BrokenGlobal& global = _brokenGlobal; BrokenGlobal& global = _brokenGlobal;
FILE* dst = global.getFile(); FILE* dst = global.getFile();
@@ -253,9 +250,10 @@ void BrokenAPI::info(const char* fmt, ...) {
::fputs("\n", dst); ::fputs("\n", dst);
::fflush(dst); ::fflush(dst);
return 1;
} }
void BrokenAPI::fail(const char* fmt, va_list ap) { int BrokenAPI::fail(const char* fmt, va_list ap) {
BrokenGlobal& global = _brokenGlobal; BrokenGlobal& global = _brokenGlobal;
FILE* dst = global.getFile(); FILE* dst = global.getFile();
@@ -276,4 +274,5 @@ void BrokenAPI::fail(const char* fmt, va_list ap) {
::fflush(dst); ::fflush(dst);
::exit(1); ::exit(1);
return 1;
} }

View File

@@ -14,8 +14,8 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
// If using Doxygen to document a source-code hide everything. Ideally this // Hide everything when using Doxygen. Ideally this can be protected by a macro,
// can be also done by a macro, but there is no global and widely used one. // but there is not globally and widely used one across multiple projects.
//! \internal //! \internal
//! \{ //! \{
@@ -25,10 +25,10 @@
// ============================================================================ // ============================================================================
struct BrokenAPI { struct BrokenAPI {
//! Test entry point. //! Entry point of a unit test defined by `UNIT` macro.
typedef void (*Entry)(void); typedef void (*Entry)(void);
//! Test unit. //! Test defined by `UNIT` macro.
struct Unit { struct Unit {
const char* name; const char* name;
Entry entry; Entry entry;
@@ -48,14 +48,18 @@ struct BrokenAPI {
} }
}; };
//! Register a new test (called automatically by `AutoUnit` and `UNIT`). //! Register a new unit test (called automatically by `AutoUnit` and `UNIT`).
static void add(Unit* unit); static void add(Unit* unit);
//! Set output file to `file`. //! Set output file to a `file`.
static void setOutputFile(FILE* file); static void setOutputFile(FILE* file);
//! Set the current context. //! Set the current context to `file` and `line`.
static void setContext(const char* file, int line); //!
//! This is called by `EXPECT` macro to set the correct `file` and `line`,
//! because `EXPECT` macro internally calls `expect()` function, which does
//! change the original file & line to non-interesting `broken.h`.
static int setContext(const char* file, int line);
//! Initialize `Broken` framework. //! Initialize `Broken` framework.
//! //!
@@ -64,29 +68,30 @@ struct BrokenAPI {
Entry onBeforeRun = (Entry)NULL, Entry onBeforeRun = (Entry)NULL,
Entry onAfterRun = (Entry)NULL); Entry onAfterRun = (Entry)NULL);
//! //! Used internally by `EXPECT` macro.
template<typename T> template<typename T>
static void expect(const T& exp, const char* fmt = NULL, ...) { static int expect(const T& exp, const char* fmt = NULL, ...) {
if (exp) if (exp)
return; return 1;
va_list ap; va_list ap;
va_start(ap, fmt); va_start(ap, fmt);
fail(fmt, ap); fail(fmt, ap);
va_end(ap); va_end(ap);
return 0;
} }
//! Log message, adds automatically new line if not present. //! Log message, adds automatically new line if not present.
static void info(const char* fmt, ...); static int info(const char* fmt, ...);
//! Called on `EXPECT()` failure. //! Called on `EXPECT()` failure.
static void fail(const char* fmt, va_list ap); static int fail(const char* fmt, va_list ap);
}; };
// ============================================================================ // ============================================================================
// [Broken - Macros] // [Broken - Macros]
// ============================================================================ // ============================================================================
//! Define a unit. //! Define a unit test.
//! //!
//! `_Name_` can only contain ASCII characters, numbers and underscore. It has //! `_Name_` can only contain ASCII characters, numbers and underscore. It has
//! the same rules as identifiers in C and C++. //! the same rules as identifiers in C and C++.
@@ -98,16 +103,15 @@ struct BrokenAPI {
\ \
static void unit_##_Name_##_entry(void) static void unit_##_Name_##_entry(void)
//! Informative message printed to stdout. //! #define INFO(...)
#define INFO(...) \ //!
::BrokenAPI::info(__VA_ARGS__) //! Informative message printed to `stdout`.
#define INFO ::BrokenAPI::setContext(__FILE__, __LINE__) && ::BrokenAPI::info
//! Expect `_Exp_` to be truthy, fail otherwise. //! #define INFO(_Exp_ [, _Format_ [, ...]])
#define EXPECT(...) \ //!
do { \ //! Expect `_Exp_` to be true or evaluates to true, fail otherwise.
::BrokenAPI::setContext(__FILE__, __LINE__); \ #define EXPECT ::BrokenAPI::setContext(__FILE__, __LINE__) && ::BrokenAPI::expect
::BrokenAPI::expect(__VA_ARGS__); \
} while(0)
//! \} //! \}

View File

@@ -105,6 +105,10 @@ static const uint8_t x86SegmentPrefix[8] = { 0x00, 0x26, 0x2E, 0x36, 0x3E, 0x64,
static const uint8_t x86OpCodePushSeg[8] = { 0x00, 0x06, 0x0E, 0x16, 0x1E, 0xA0, 0xA8 }; static const uint8_t x86OpCodePushSeg[8] = { 0x00, 0x06, 0x0E, 0x16, 0x1E, 0xA0, 0xA8 };
static const uint8_t x86OpCodePopSeg[8] = { 0x00, 0x07, 0x00, 0x17, 0x1F, 0xA1, 0xA9 }; static const uint8_t x86OpCodePopSeg[8] = { 0x00, 0x07, 0x00, 0x17, 0x1F, 0xA1, 0xA9 };
// ============================================================================
// [Utils]
// ============================================================================
//! Encode MODR/M. //! Encode MODR/M.
static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) { static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) {
return (m << 6) + (o << 3) + rm; return (m << 6) + (o << 3) + rm;
@@ -115,6 +119,13 @@ static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) {
return (s << 6) + (i << 3) + b; return (s << 6) + (i << 3) + b;
} }
//! Get whether the two pointers `a` and `b` can be encoded by using relative
//! displacement, which fits into a signed 32-bit integer.
static ASMJIT_INLINE bool x64IsRelative(Ptr a, Ptr b) {
SignedPtr diff = static_cast<SignedPtr>(a) - static_cast<SignedPtr>(b);
return IntUtil::isInt32(diff);
}
// ============================================================================ // ============================================================================
// [Macros] // [Macros]
// ============================================================================ // ============================================================================
@@ -291,76 +302,6 @@ Error X86Assembler::setArch(uint32_t arch) {
return kErrorInvalidArgument; return kErrorInvalidArgument;
} }
// ============================================================================
// [asmjit::X86Assembler - Label]
// ============================================================================
void X86Assembler::_bind(const Label& label) {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelDataById(index);
// Label can be bound only once.
ASMJIT_ASSERT(data->offset == -1);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleLabel, "L%u:\n", index);
#endif // !ASMJIT_DISABLE_LOGGER
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* prev = NULL;
while (link) {
intptr_t offset = link->offset;
if (link->relocId != -1) {
// If linked label points to RelocData then instead of writing relative
// displacement to assembler stream, we will write it to RelocData.
_relocData[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we overwriting real displacement
// in assembler stream.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = getByteAt(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
setInt32At(offset, patchedValue);
}
else { // if (size) == 1
if (IntUtil::isInt8(patchedValue))
setByteAt(offset, static_cast<uint8_t>(patchedValue & 0xFF));
else
setError(kErrorIllegalDisplacement);
}
}
prev = link->prev;
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == NULL)
prev = link;
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Unlink label if it was linked.
data->offset = pos;
data->links = NULL;
}
// ============================================================================ // ============================================================================
// [asmjit::X86Assembler - Embed] // [asmjit::X86Assembler - Embed]
// ============================================================================ // ============================================================================
@@ -374,7 +315,7 @@ Error X86Assembler::embedLabel(const Label& op) {
uint8_t* cursor = getCursor(); uint8_t* cursor = getCursor();
LabelData* label = getLabelDataById(op.getId()); LabelData* label = getLabelData(op.getId());
RelocData reloc; RelocData reloc;
#if !defined(ASMJIT_DISABLE_LOGGER) #if !defined(ASMJIT_DISABLE_LOGGER)
@@ -398,12 +339,12 @@ Error X86Assembler::embedLabel(const Label& op) {
link->prev = (LabelLink*)label->links; link->prev = (LabelLink*)label->links;
link->offset = getOffset(); link->offset = getOffset();
link->displacement = 0; link->displacement = 0;
link->relocId = _relocData.getLength(); link->relocId = _relocList.getLength();
label->links = link; label->links = link;
} }
if (_relocData.append(reloc) != kErrorOk) if (_relocList.append(reloc) != kErrorOk)
return setError(kErrorNoHeapMemory); return setError(kErrorNoHeapMemory);
// Emit dummy intptr_t (4 or 8 bytes; depends on the address size). // Emit dummy intptr_t (4 or 8 bytes; depends on the address size).
@@ -420,7 +361,7 @@ Error X86Assembler::embedLabel(const Label& op) {
// [asmjit::X86Assembler - Align] // [asmjit::X86Assembler - Align]
// ============================================================================ // ============================================================================
Error X86Assembler::_align(uint32_t mode, uint32_t offset) { Error X86Assembler::align(uint32_t mode, uint32_t offset) {
#if !defined(ASMJIT_DISABLE_LOGGER) #if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger) if (_logger)
_logger->logFormat(kLoggerStyleDirective, _logger->logFormat(kLoggerStyleDirective,
@@ -443,7 +384,7 @@ Error X86Assembler::_align(uint32_t mode, uint32_t offset) {
if (mode == kAlignCode) { if (mode == kAlignCode) {
alignPattern = 0x90; alignPattern = 0x90;
if (IntUtil::hasBit(_features, kCodeGenOptimizedAlign)) { if (hasFeature(kCodeGenOptimizedAlign)) {
const X86CpuInfo* cpuInfo = static_cast<const X86CpuInfo*>(getRuntime()->getCpuInfo()); const X86CpuInfo* cpuInfo = static_cast<const X86CpuInfo*>(getRuntime()->getCpuInfo());
// NOPs optimized for Intel: // NOPs optimized for Intel:
@@ -534,7 +475,7 @@ Error X86Assembler::_align(uint32_t mode, uint32_t offset) {
// [asmjit::X86Assembler - Reloc] // [asmjit::X86Assembler - Reloc]
// ============================================================================ // ============================================================================
size_t X86Assembler::_relocCode(void* _dst, Ptr base) const { size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
uint32_t arch = getArch(); uint32_t arch = getArch();
uint8_t* dst = static_cast<uint8_t*>(_dst); uint8_t* dst = static_cast<uint8_t*>(_dst);
@@ -542,48 +483,49 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
Logger* logger = getLogger(); Logger* logger = getLogger();
#endif // ASMJIT_DISABLE_LOGGER #endif // ASMJIT_DISABLE_LOGGER
size_t codeOffset = getOffset(); size_t minCodeSize = getOffset(); // Current offset is the minimum code size.
size_t codeSize = getCodeSize(); size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines.
// We will copy the exact size of the generated code. Extra code for trampolines // We will copy the exact size of the generated code. Extra code for trampolines
// is generated on-the-fly by the relocator (this code doesn't exist at the moment). // is generated on-the-fly by the relocator (this code doesn't exist at the moment).
::memcpy(dst, _buffer, codeOffset); ::memcpy(dst, _buffer, minCodeSize);
// Trampoline pointer. // Trampoline pointer.
uint8_t* tramp = dst + codeOffset; uint8_t* tramp = dst + minCodeSize;
// Relocate all recorded locations. // Relocate all recorded locations.
size_t relocIndex; size_t relocCount = _relocList.getLength();
size_t relocCount = _relocData.getLength(); const RelocData* relocData = _relocList.getData();
const RelocData* relocData = _relocData.getData();
for (relocIndex = 0; relocIndex < relocCount; relocIndex++) { for (size_t i = 0; i < relocCount; i++) {
const RelocData& r = relocData[relocIndex]; const RelocData& r = relocData[i];
Ptr ptr;
// Make sure that the `RelocData` is correct.
Ptr ptr = r.data;
size_t offset = static_cast<size_t>(r.from);
ASMJIT_ASSERT(offset + r.size <= static_cast<Ptr>(maxCodeSize));
// Whether to use trampoline, can be only used if relocation type is // Whether to use trampoline, can be only used if relocation type is
// kRelocAbsToRel on 64-bit. // kRelocAbsToRel on 64-bit.
bool useTrampoline = false; bool useTrampoline = false;
// Be sure that reloc data structure is correct.
size_t offset = static_cast<size_t>(r.from);
ASMJIT_ASSERT(offset + r.size <= static_cast<Ptr>(codeSize));
switch (r.type) { switch (r.type) {
case kRelocAbsToAbs: case kRelocAbsToAbs:
ptr = r.data;
break; break;
case kRelocRelToAbs: case kRelocRelToAbs:
ptr = r.data + base; ptr += baseAddress;
break; break;
case kRelocAbsToRel: case kRelocAbsToRel:
case kRelocTrampoline: ptr -= baseAddress + r.from + 4;
ptr = r.data - (base + r.from + 4); break;
if (arch == kArchX64 && r.type == kRelocTrampoline && !IntUtil::isInt32(ptr)) { case kRelocTrampoline:
ptr = (Ptr)tramp - (base + r.from + 4); ptr -= baseAddress + r.from + 4;
if (!IntUtil::isInt32(static_cast<SignedPtr>(ptr))) {
ptr = (Ptr)tramp - (baseAddress + r.from + 4);
useTrampoline = true; useTrampoline = true;
} }
break; break;
@@ -593,26 +535,30 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
} }
switch (r.size) { switch (r.size) {
case 4: *reinterpret_cast<int32_t*>(dst + offset) = static_cast<int32_t>(ptr); break; case 8:
case 8: *reinterpret_cast<int64_t*>(dst + offset) = static_cast<int64_t>(ptr); break; *reinterpret_cast<int64_t*>(dst + offset) = static_cast<int64_t>(ptr);
break;
case 4:
*reinterpret_cast<int32_t*>(dst + offset) = static_cast<int32_t>(static_cast<SignedPtr>(ptr));
break;
default: default:
ASMJIT_ASSERT(!"Reached"); ASMJIT_ASSERT(!"Reached");
} }
// Patch `jmp/call` to use trampoline. // Handle the case where trampoline has been used.
if (arch == kArchX64 && useTrampoline) { if (useTrampoline) {
// Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF; uint32_t byte0 = 0xFF;
uint32_t byte1 = dst[offset - 1]; uint32_t byte1 = dst[offset - 1];
if (byte1 == 0xE8) { // Call, patch to FF/2 (-> 0x15).
// Call, path to FF/2 (-> 0x15). if (byte1 == 0xE8)
byte1 = x86EncodeMod(0, 2, 5); byte1 = x86EncodeMod(0, 2, 5);
} // Jmp, patch to FF/4 (-> 0x25).
else if (byte1 == 0xE9) { else if (byte1 == 0xE9)
// Jmp, path to FF/4 (-> 0x25).
byte1 = x86EncodeMod(0, 4, 5); byte1 = x86EncodeMod(0, 4, 5);
}
// Patch `jmp/call` instruction. // Patch `jmp/call` instruction.
ASMJIT_ASSERT(offset >= 2); ASMJIT_ASSERT(offset >= 2);
@@ -635,7 +581,7 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
if (arch == kArchX64) if (arch == kArchX64)
return (size_t)(tramp - dst); return (size_t)(tramp - dst);
else else
return (size_t)(codeOffset); return (size_t)(minCodeSize);
} }
// ============================================================================ // ============================================================================
@@ -993,7 +939,7 @@ static Error ASMJIT_CDECL X86Assembler_emit(Assembler* self_, uint32_t code, con
uint8_t* cursor = self->getCursor(); uint8_t* cursor = self->getCursor();
uint32_t encoded = o0->getOp() + (o1->getOp() << 3) + (o2->getOp() << 6); uint32_t encoded = o0->getOp() + (o1->getOp() << 3) + (o2->getOp() << 6);
uint32_t options = self->getOptionsAndReset(); uint32_t options = self->getInstOptionsAndReset();
// Invalid instruction. // Invalid instruction.
if (code >= _kX86InstIdCount) { if (code >= _kX86InstIdCount) {
@@ -1355,11 +1301,11 @@ _Prepare:
if (encoded == ENC_OPS(Imm, None, None)) { if (encoded == ENC_OPS(Imm, None, None)) {
imVal = static_cast<const Imm*>(o0)->getInt64(); imVal = static_cast<const Imm*>(o0)->getInt64();
goto _EmitJmpOrCallImm; goto _EmitJmpOrCallAbs;
} }
if (encoded == ENC_OPS(Label, None, None)) { if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId()); label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (label->offset != -1) { if (label->offset != -1) {
// Bound label. // Bound label.
static const intptr_t kRel32Size = 5; static const intptr_t kRel32Size = 5;
@@ -1529,9 +1475,9 @@ _Prepare:
case kX86InstGroupX86Jcc: case kX86InstGroupX86Jcc:
if (encoded == ENC_OPS(Label, None, None)) { if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId()); label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (IntUtil::hasBit(self->_features, kCodeGenPredictedJumps)) { if (self->hasFeature(kCodeGenPredictedJumps)) {
if (options & kInstOptionTaken) if (options & kInstOptionTaken)
EMIT_BYTE(0x3E); EMIT_BYTE(0x3E);
if (options & kInstOptionNotTaken) if (options & kInstOptionNotTaken)
@@ -1593,7 +1539,7 @@ _Prepare:
} }
EMIT_BYTE(0xE3); EMIT_BYTE(0xE3);
label = self->getLabelDataById(static_cast<const Label*>(o1)->getId()); label = self->getLabelData(static_cast<const Label*>(o1)->getId());
if (label->offset != -1) { if (label->offset != -1) {
// Bound label. // Bound label.
@@ -1630,11 +1576,11 @@ _Prepare:
if (encoded == ENC_OPS(Imm, None, None)) { if (encoded == ENC_OPS(Imm, None, None)) {
imVal = static_cast<const Imm*>(o0)->getInt64(); imVal = static_cast<const Imm*>(o0)->getInt64();
goto _EmitJmpOrCallImm; goto _EmitJmpOrCallAbs;
} }
if (encoded == ENC_OPS(Label, None, None)) { if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId()); label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (label->offset != -1) { if (label->offset != -1) {
// Bound label. // Bound label.
const intptr_t kRel8Size = 2; const intptr_t kRel8Size = 2;
@@ -3706,8 +3652,8 @@ _EmitSib:
if (rmMem->getMemType() == kMemTypeLabel) { if (rmMem->getMemType() == kMemTypeLabel) {
// Relative->Absolute [x86 mode]. // Relative->Absolute [x86 mode].
label = self->getLabelDataById(rmMem->_vmem.base); label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocData.getLength(); relocId = self->_relocList.getLength();
RelocData reloc; RelocData reloc;
reloc.type = kRelocRelToAbs; reloc.type = kRelocRelToAbs;
@@ -3715,7 +3661,7 @@ _EmitSib:
reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer)); reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
reloc.data = static_cast<SignedPtr>(dispOffset); reloc.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocData.append(reloc) != kErrorOk) if (self->_relocList.append(reloc) != kErrorOk)
return self->setError(kErrorNoHeapMemory); return self->setError(kErrorNoHeapMemory);
if (label->offset != -1) { if (label->offset != -1) {
@@ -3738,7 +3684,7 @@ _EmitSib:
else /* if (Arch === kArchX64) */ { else /* if (Arch === kArchX64) */ {
if (rmMem->getMemType() == kMemTypeLabel) { if (rmMem->getMemType() == kMemTypeLabel) {
// [RIP + Disp32]. // [RIP + Disp32].
label = self->getLabelDataById(rmMem->_vmem.base); label = self->getLabelData(rmMem->_vmem.base);
// Indexing is invalid. // Indexing is invalid.
if (mIndex < kInvalidReg) if (mIndex < kInvalidReg)
@@ -3976,8 +3922,8 @@ _EmitAvxV:
goto _IllegalAddr; goto _IllegalAddr;
// Relative->Absolute [x86 mode]. // Relative->Absolute [x86 mode].
label = self->getLabelDataById(rmMem->_vmem.base); label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocData.getLength(); relocId = self->_relocList.getLength();
RelocData reloc; RelocData reloc;
reloc.type = kRelocRelToAbs; reloc.type = kRelocRelToAbs;
@@ -3985,7 +3931,7 @@ _EmitAvxV:
reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer)); reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
reloc.data = static_cast<SignedPtr>(dispOffset); reloc.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocData.append(reloc) != kErrorOk) if (self->_relocList.append(reloc) != kErrorOk)
return self->setError(kErrorNoHeapMemory); return self->setError(kErrorNoHeapMemory);
if (label->offset != -1) { if (label->offset != -1) {
@@ -4090,43 +4036,55 @@ _EmitXopM:
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// 64-bit mode requires a trampoline if a relative displacement doesn't fit // 64-bit mode requires a trampoline if a relative displacement doesn't fit
// into 32-bit integer. Old version of AsmJit used to emit jump to a section // into a 32-bit address. Old version of AsmJit used to emit jump to a section
// which contained another jump followed by an address (it worked well for // which contained another jump followed by an address (it worked well for
// both `jmp` and `call`), but it required to reserve 14-bytes for a possible // both `jmp` and `call`), but it required to reserve 14-bytes for a possible
// trampoline. // trampoline.
// //
// Instead of using 5-byte `jmp/call` and reserving 14 bytes required by the // Instead of using 5-byte `jmp/call` and reserving 14 bytes required by the
// trampoline, it's better to use 6-byte `jmp/call` (prefixing it with REX // trampoline, it's better to use 6-byte `jmp/call` (prefixing it with REX
// prefix) and to patch the `jmp/call` instruction itself. // prefix) and to patch the `jmp/call` instruction to read the address from
_EmitJmpOrCallImm: // a memory in case the trampoline is needed.
//
_EmitJmpOrCallAbs:
{ {
// Emit REX prefix (64-bit).
//
// Does nothing, but allows to path the instruction in case a trampoline is
// needed.
if (Arch == kArchX64) {
EMIT_OP(0x40);
}
// Both `jmp` and `call` instructions have a single-byte opcode.
EMIT_OP(opCode);
RelocData rd; RelocData rd;
rd.type = kRelocTrampoline; rd.type = kRelocAbsToRel;
rd.size = 4; rd.size = 4;
rd.from = (intptr_t)(cursor - self->_buffer); rd.from = (intptr_t)(cursor - self->_buffer) + 1;
rd.data = static_cast<SignedPtr>(imVal); rd.data = static_cast<SignedPtr>(imVal);
if (self->_relocData.append(rd) != kErrorOk) uint32_t trampolineSize = 0;
return self->setError(kErrorNoHeapMemory);
// Emit dummy 32-bit integer; will be overwritten by `relocCode()`. if (Arch == kArchX64) {
Ptr baseAddress = self->getBaseAddress();
Ptr diff = rd.data - (baseAddress + rd.from + 4);
// If the base address of the output is known, it's possible to determine
// the need for a trampoline here. This saves possible REX prefix in
// 64-bit mode and prevents reserving space needed for an absolute address.
if (baseAddress == kNoBaseAddress || !x64IsRelative(rd.data, baseAddress + rd.from + 4)) {
// Emit REX prefix so the instruction can be patched later on. The REX
// prefix does nothing if not patched after, but allows to patch the
// instruction in case where the trampoline is needed.
rd.type = kRelocTrampoline;
rd.from++;
EMIT_OP(0x40);
trampolineSize = 8;
}
}
// Both `jmp` and `call` instructions have a single-byte opcode and are
// followed by a 32-bit displacement.
EMIT_OP(opCode);
EMIT_DWORD(0); EMIT_DWORD(0);
// Trampoline has to be reserved, even if it's not used. if (self->_relocList.append(rd) != kErrorOk)
if (Arch == kArchX64) { return self->setError(kErrorNoHeapMemory);
self->_trampolineSize += 8;
} // Reserve space for a possible trampoline.
self->_trampolineSize += trampolineSize;
} }
goto _EmitDone; goto _EmitDone;

View File

@@ -29,43 +29,43 @@ namespace asmjit {
#define ASMJIT_X86_EMIT_OPTIONS(_Class_) \ #define ASMJIT_X86_EMIT_OPTIONS(_Class_) \
/*! Force short form of jmp/jcc instruction. */ \ /*! Force short form of jmp/jcc instruction. */ \
ASMJIT_INLINE _Class_& short_() { \ ASMJIT_INLINE _Class_& short_() { \
_options |= kInstOptionShortForm; \ _instOptions |= kInstOptionShortForm; \
return *this; \ return *this; \
} \ } \
\ \
/*! Force long form of jmp/jcc instruction. */ \ /*! Force long form of jmp/jcc instruction. */ \
ASMJIT_INLINE _Class_& long_() { \ ASMJIT_INLINE _Class_& long_() { \
_options |= kInstOptionLongForm; \ _instOptions |= kInstOptionLongForm; \
return *this; \ return *this; \
} \ } \
\ \
/*! Condition is likely to be taken (has only benefit on P4). */ \ /*! Condition is likely to be taken (has only benefit on P4). */ \
ASMJIT_INLINE _Class_& taken() { \ ASMJIT_INLINE _Class_& taken() { \
_options |= kInstOptionTaken; \ _instOptions |= kInstOptionTaken; \
return *this; \ return *this; \
} \ } \
\ \
/*! Condition is unlikely to be taken (has only benefit on P4). */ \ /*! Condition is unlikely to be taken (has only benefit on P4). */ \
ASMJIT_INLINE _Class_& notTaken() { \ ASMJIT_INLINE _Class_& notTaken() { \
_options |= kInstOptionNotTaken; \ _instOptions |= kInstOptionNotTaken; \
return *this; \ return *this; \
} \ } \
\ \
/*! Use LOCK prefix. */ \ /*! Use LOCK prefix. */ \
ASMJIT_INLINE _Class_& lock() { \ ASMJIT_INLINE _Class_& lock() { \
_options |= kX86InstOptionLock; \ _instOptions |= kX86InstOptionLock; \
return *this; \ return *this; \
} \ } \
\ \
/*! Force REX prefix. */ \ /*! Force REX prefix. */ \
ASMJIT_INLINE _Class_& rex() { \ ASMJIT_INLINE _Class_& rex() { \
_options |= kX86InstOptionRex; \ _instOptions |= kX86InstOptionRex; \
return *this; \ return *this; \
} \ } \
\ \
/*! Force 3-byte VEX prefix. */ \ /*! Force 3-byte VEX prefix. */ \
ASMJIT_INLINE _Class_& vex3() { \ ASMJIT_INLINE _Class_& vex3() { \
_options |= kX86InstOptionVex3; \ _instOptions |= kX86InstOptionVex3; \
return *this; \ return *this; \
} }
@@ -377,12 +377,6 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler {
ASMJIT_API Error setArch(uint32_t arch); ASMJIT_API Error setArch(uint32_t arch);
// --------------------------------------------------------------------------
// [Label]
// --------------------------------------------------------------------------
ASMJIT_API virtual void _bind(const Label& label);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Embed] // [Embed]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
@@ -439,13 +433,13 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler {
// [Align] // [Align]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
ASMJIT_API virtual Error _align(uint32_t mode, uint32_t offset); ASMJIT_API virtual Error align(uint32_t mode, uint32_t offset);
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Reloc] // [Reloc]
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
ASMJIT_API virtual size_t _relocCode(void* dst, Ptr base) const; ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const;
// -------------------------------------------------------------------------- // --------------------------------------------------------------------------
// [Emit] // [Emit]

View File

@@ -814,7 +814,7 @@ InstNode* X86Compiler::newInst(uint32_t code) {
if (inst == NULL) if (inst == NULL)
goto _NoMemory; goto _NoMemory;
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), NULL, 0); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), NULL, 0);
_NoMemory: _NoMemory:
setError(kErrorNoHeapMemory); setError(kErrorNoHeapMemory);
@@ -832,7 +832,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0) {
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size); Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0; opList[0] = o0;
ASMJIT_ASSERT_UNINITIALIZED(o0); ASMJIT_ASSERT_UNINITIALIZED(o0);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 1); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 1);
} }
_NoMemory: _NoMemory:
@@ -853,7 +853,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
opList[1] = o1; opList[1] = o1;
ASMJIT_ASSERT_UNINITIALIZED(o0); ASMJIT_ASSERT_UNINITIALIZED(o0);
ASMJIT_ASSERT_UNINITIALIZED(o1); ASMJIT_ASSERT_UNINITIALIZED(o1);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 2); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 2);
} }
_NoMemory: _NoMemory:
@@ -876,7 +876,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o0); ASMJIT_ASSERT_UNINITIALIZED(o0);
ASMJIT_ASSERT_UNINITIALIZED(o1); ASMJIT_ASSERT_UNINITIALIZED(o1);
ASMJIT_ASSERT_UNINITIALIZED(o2); ASMJIT_ASSERT_UNINITIALIZED(o2);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 3); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 3);
} }
_NoMemory: _NoMemory:
@@ -901,7 +901,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o1); ASMJIT_ASSERT_UNINITIALIZED(o1);
ASMJIT_ASSERT_UNINITIALIZED(o2); ASMJIT_ASSERT_UNINITIALIZED(o2);
ASMJIT_ASSERT_UNINITIALIZED(o3); ASMJIT_ASSERT_UNINITIALIZED(o3);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 4); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 4);
} }
_NoMemory: _NoMemory:
@@ -928,7 +928,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o2); ASMJIT_ASSERT_UNINITIALIZED(o2);
ASMJIT_ASSERT_UNINITIALIZED(o3); ASMJIT_ASSERT_UNINITIALIZED(o3);
ASMJIT_ASSERT_UNINITIALIZED(o4); ASMJIT_ASSERT_UNINITIALIZED(o4);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 5); return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 5);
} }
_NoMemory: _NoMemory:
@@ -1189,7 +1189,7 @@ Error X86Compiler::setArg(uint32_t argIndex, Var& var) {
if (func == NULL) if (func == NULL)
return kErrorInvalidArgument; return kErrorInvalidArgument;
if (!isVarCreated(var)) if (!isVarValid(var))
return kErrorInvalidState; return kErrorInvalidState;
VarData* vd = getVd(var); VarData* vd = getVd(var);
@@ -1293,45 +1293,38 @@ _OnError:
// ============================================================================ // ============================================================================
void* X86Compiler::make() { void* X86Compiler::make() {
// Flush global constant pool Assembler* assembler = getAssembler();
X86Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool); if (assembler == NULL) {
setError(kErrorNoHeapMemory);
X86Assembler assembler(_runtime, _arch);
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger)
assembler.setLogger(logger);
#endif // !ASMJIT_DISABLE_LOGGER
assembler._features = _features;
if (serialize(assembler) != kErrorOk)
return NULL;
if (assembler.getError() != kErrorOk) {
setError(assembler.getError());
return NULL; return NULL;
} }
void* result = assembler.make(); Error error = serialize(assembler);
if (error != kErrorOk) {
#if !defined(ASMJIT_DISABLE_LOGGER) setError(error);
if (logger) return NULL;
logger->logFormat(kLoggerStyleComment, }
"*** COMPILER SUCCESS - Wrote %u bytes, code: %u, trampolines: %u.\n\n",
static_cast<unsigned int>(assembler.getCodeSize()),
static_cast<unsigned int>(assembler.getOffset()),
static_cast<unsigned int>(assembler.getTrampolineSize()));
#endif // !ASMJIT_DISABLE_LOGGER
void* result = assembler->make();
return result; return result;
} }
// ============================================================================ // ============================================================================
// [asmjit::X86Compiler - Assemble] // [asmjit::X86Compiler - Assembler]
// ============================================================================ // ============================================================================
Error X86Compiler::serialize(Assembler& assembler) { Assembler* X86Compiler::_newAssembler() {
return new(std::nothrow) X86Assembler(_runtime, _arch);
}
// ============================================================================
// [asmjit::X86Compiler - Serialize]
// ============================================================================
Error X86Compiler::serialize(Assembler* assembler) {
// Flush the global constant pool.
X86Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool);
if (_firstNode == NULL) if (_firstNode == NULL)
return kErrorOk; return kErrorOk;
@@ -1357,7 +1350,7 @@ Error X86Compiler::serialize(Assembler& assembler) {
node = node->getNext(); node = node->getNext();
} while (node != NULL && node->getType() != kNodeTypeFunc); } while (node != NULL && node->getType() != kNodeTypeFunc);
error = context.serialize(&assembler, start, node); error = context.serialize(assembler, start, node);
if (error != kErrorOk) if (error != kErrorOk)
goto _Error; goto _Error;
context.cleanup(); context.cleanup();

View File

@@ -37,7 +37,7 @@ struct X86VarState;
// ============================================================================ // ============================================================================
//! X86/X64 variable type. //! X86/X64 variable type.
ASMJIT_ENUM(k86VarType) { ASMJIT_ENUM(kX86VarType) {
//! Variable is SP-FP (x87). //! Variable is SP-FP (x87).
kX86VarTypeFp32 = kVarTypeFp32, kX86VarTypeFp32 = kVarTypeFp32,
//! Variable is DP-FP (x87). //! Variable is DP-FP (x87).
@@ -1547,7 +1547,7 @@ ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm);
//! //!
//! // Final step - generate code. asmjit::Compiler::serialize() will send all //! // Final step - generate code. asmjit::Compiler::serialize() will send all
//! // instructions into Assembler and this ensures generating real machine code. //! // instructions into Assembler and this ensures generating real machine code.
//! c.serialize(a); //! c.serialize(&a);
//! //!
//! // Your function //! // Your function
//! void* fn = a.make(); //! void* fn = a.make();
@@ -2401,11 +2401,17 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler {
ASMJIT_API virtual void* make(); ASMJIT_API virtual void* make();
// -------------------------------------------------------------------------
// [Assembler]
// -------------------------------------------------------------------------
ASMJIT_API virtual Assembler* _newAssembler();
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// [Serialize] // [Serialize]
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
ASMJIT_API virtual Error serialize(Assembler& assembler); ASMJIT_API virtual Error serialize(Assembler* assembler);
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// [Options] // [Options]

View File

@@ -891,14 +891,14 @@ void X86Context::emitPopSequence(uint32_t regs) {
if (regs == 0) if (regs == 0)
return; return;
int32_t i = static_cast<int32_t>(_regCount.getGp()) - 1; uint32_t i = static_cast<int32_t>(_regCount.getGp());
uint32_t mask = 0x1 << static_cast<uint32_t>(i); uint32_t mask = 0x1 << static_cast<uint32_t>(i - 1);
X86GpReg gpReg(_zsp); X86GpReg gpReg(_zsp);
while (i >= 0) { while (i) {
i--;
if ((regs & mask) != 0) if ((regs & mask) != 0)
compiler->emit(kX86InstIdPop, gpReg.setIndex(i)); compiler->emit(kX86InstIdPop, gpReg.setIndex(i));
i--;
mask >>= 1; mask >>= 1;
} }
} }
@@ -931,7 +931,7 @@ void X86Context::emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32
case kX86VarTypeXmmSs: case kX86VarTypeXmmSs:
if (srcType == kX86VarTypeXmmSd || srcType == kX86VarTypeXmmPd || srcType == kX86VarTypeYmmPd) { if (srcType == kX86VarTypeXmmSd || srcType == kX86VarTypeXmmPd || srcType == kX86VarTypeYmmPd) {
compiler->emit(kX86InstIdCvtsd2ss, x86::xmm(dstIndex), x86::xmm(srcIndex)); compiler->emit(kX86InstIdCvtsd2ss, x86::xmm(dstIndex), x86::xmm(srcIndex));
break; return;
} }
if (IntUtil::inInterval<uint32_t>(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) { if (IntUtil::inInterval<uint32_t>(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) {
@@ -973,8 +973,7 @@ void X86Context::emitMoveVarOnStack(
X86Compiler* compiler = getCompiler(); X86Compiler* compiler = getCompiler();
X86Mem m0(*dst); X86Mem m0(*dst);
X86Reg r0; X86Reg r0, r1;
X86Reg r1;
uint32_t regSize = compiler->getRegSize(); uint32_t regSize = compiler->getRegSize();
uint32_t instCode; uint32_t instCode;
@@ -1271,14 +1270,12 @@ void X86Context::emitMoveImmOnStack(uint32_t dstType, const X86Mem* dst, const I
case kVarTypeInt8: case kVarTypeInt8:
case kVarTypeUInt8: case kVarTypeUInt8:
imm.truncateTo8Bits(); imm.truncateTo8Bits();
compiler->emit(kX86InstIdMov, mem, imm); goto _Move32;
break;
case kVarTypeInt16: case kVarTypeInt16:
case kVarTypeUInt16: case kVarTypeUInt16:
imm.truncateTo16Bits(); imm.truncateTo16Bits();
compiler->emit(kX86InstIdMov, mem, imm); goto _Move32;
break;
case kVarTypeInt32: case kVarTypeInt32:
case kVarTypeUInt32: case kVarTypeUInt32:
@@ -1294,13 +1291,11 @@ _Move64:
uint32_t hi = imm.getUInt32Hi(); uint32_t hi = imm.getUInt32Hi();
// Lo-Part. // Lo-Part.
imm.truncateTo32Bits(); compiler->emit(kX86InstIdMov, mem, imm.truncateTo32Bits());
compiler->emit(kX86InstIdMov, mem, imm); mem.adjust(regSize);
// Hi-Part. // Hi-Part.
mem.adjust(regSize); compiler->emit(kX86InstIdMov, mem, imm.setUInt32(hi));
imm.setUInt32(hi);
compiler->emit(kX86InstIdMov, mem, imm);
} }
else { else {
compiler->emit(kX86InstIdMov, mem, imm); compiler->emit(kX86InstIdMov, mem, imm);
@@ -1324,33 +1319,27 @@ _Move64:
if (regSize == 4) { if (regSize == 4) {
uint32_t hi = imm.getUInt32Hi(); uint32_t hi = imm.getUInt32Hi();
// Lo-Part. // Lo part.
imm.truncateTo32Bits(); compiler->emit(kX86InstIdMov, mem, imm.truncateTo32Bits());
compiler->emit(kX86InstIdMov, mem, imm);
// Hi-Part.
mem.adjust(regSize); mem.adjust(regSize);
imm.setUInt32(hi);
compiler->emit(kX86InstIdMov, mem, imm);
// Zero part - performing AND should generate shorter code, because // Hi part.
// 8-bit immediate can be used instead of 32-bit immediate required compiler->emit(kX86InstIdMov, mem, imm.setUInt32(hi));
// by MOV instruction.
mem.adjust(regSize); mem.adjust(regSize);
imm.setUInt32(0);
compiler->emit(kX86InstIdAnd, mem, imm);
mem.adjust(regSize);
compiler->emit(kX86InstIdAnd, mem, imm);
}
else {
// Lo-Hi parts.
compiler->emit(kX86InstIdMov, mem, imm);
// Zero part. // Zero part.
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(0));
mem.adjust(regSize); mem.adjust(regSize);
imm.setUInt32(0);
compiler->emit(kX86InstIdAnd, mem, imm); compiler->emit(kX86InstIdMov, mem, imm);
}
else {
// Lo/Hi parts.
compiler->emit(kX86InstIdMov, mem, imm);
mem.adjust(regSize);
// Zero part.
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(0));
} }
break; break;
@@ -5345,8 +5334,7 @@ _NextGroup:
goto _Advance; goto _Advance;
} }
// Remove informative nodes if we are in a middle of instruction // Remove informative nodes if we are in a middle of instruction stream.
// stream.
// //
// TODO: Shouldn't be there an option for this? Maybe it can be useful // TODO: Shouldn't be there an option for this? Maybe it can be useful
// to stop if there is a comment or something. I'm not sure if it's // to stop if there is a comment or something. I'm not sure if it's
@@ -5422,7 +5410,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
// Create labels on Assembler side. // Create labels on Assembler side.
ASMJIT_PROPAGATE_ERROR( ASMJIT_PROPAGATE_ERROR(
assembler->_registerIndexedLabels(self->getCompiler()->_targets.getLength())); assembler->_registerIndexedLabels(self->getCompiler()->_targetList.getLength()));
do { do {
#if !defined(ASMJIT_DISABLE_LOGGER) #if !defined(ASMJIT_DISABLE_LOGGER)
@@ -5506,6 +5494,8 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
case kNodeTypeTarget: { case kNodeTypeTarget: {
TargetNode* node = static_cast<TargetNode*>(node_); TargetNode* node = static_cast<TargetNode*>(node_);
node->setOffset(assembler->getOffset());
assembler->bind(node->getLabel()); assembler->bind(node->getLabel());
break; break;
} }
@@ -5517,7 +5507,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
uint32_t opCount = node->getOpCount(); uint32_t opCount = node->getOpCount();
const Operand* opList = node->getOpList(); const Operand* opList = node->getOpList();
assembler->_options = node->getOptions(); assembler->_instOptions = node->getOptions();
const Operand* o0 = &noOperand; const Operand* o0 = &noOperand;
const Operand* o1 = &noOperand; const Operand* o1 = &noOperand;

View File

@@ -1701,15 +1701,15 @@ ASMJIT_ENUM(kX86EFlags) {
//! Overflow flag (OF). //! Overflow flag (OF).
//! //!
//! Set if the integer result is too large a positive number or too small a //! Set if the integer result is too large a positive number or too small a
//! negative number (excluding the sign-bit) to fit in the destination //! negative number (excluding the sign-bit) to fit in the destination
//! operand; cleared otherwise. This flag indicates an overflow condition for //! operand; cleared otherwise. This flag indicates an overflow condition for
//! signed-integer arithmetic. //! signed-integer arithmetic.
kX86EFlagO = 0x01, kX86EFlagO = 0x01,
//! Sign flag (SF). //! Sign flag (SF).
//! //!
//! Set equal to the most-significant bit of the result, which is the sign //! Set equal to the most-significant bit of the result, which is the sign
//! bit of a signed integer (0 == positive value, 1 == negative value). //! bit of a signed integer (0 == positive value, 1 == negative value).
kX86EFlagS = 0x02, kX86EFlagS = 0x02,
@@ -1720,26 +1720,26 @@ ASMJIT_ENUM(kX86EFlags) {
//! Adjust flag (AF). //! Adjust flag (AF).
//! //!
//! Set if an arithmetic operation generates a carry or a borrow out of bit //! Set if an arithmetic operation generates a carry or a borrow out of bit
//! 3 of the result; cleared otherwise. This flag is used in binary-coded //! 3 of the result; cleared otherwise. This flag is used in binary-coded
//! decimal (BCD) arithmetic. //! decimal (BCD) arithmetic.
kX86EFlagA = 0x08, kX86EFlagA = 0x08,
//! Parity flag (PF). //! Parity flag (PF).
//! //!
//! Set if the least-significant byte of the result contains an even number //! Set if the least-significant byte of the result contains an even number
//! of 1 bits; cleared otherwise. //! of 1 bits; cleared otherwise.
kX86EFlagP = 0x10, kX86EFlagP = 0x10,
//! Carry flag (CF). //! Carry flag (CF).
//! //!
//! Set if an arithmetic operation generates a carry or a borrow out of the //! Set if an arithmetic operation generates a carry or a borrow out of the
//! mostsignificant bit of the result; cleared otherwise. //! mostsignificant bit of the result; cleared otherwise.
kX86EFlagC = 0x20, kX86EFlagC = 0x20,
//! Direction flag (DF). //! Direction flag (DF).
//! //!
//! The direction flag controls string instructions `movs`, `cmps`, `scas, //! The direction flag controls string instructions `movs`, `cmps`, `scas,
//! `lods` and `stos`. //! `lods` and `stos`.
kX86EFlagD = 0x40, kX86EFlagD = 0x40,