- Initial support for StaticRuntime (known base address before the code address is generated).

- Bind moved from X86Assembler to Assembler.
- Added helpers to get offset of labels (useful after the code is generated).
- Added support to cache and reuse Assembler if only Compiler is used.
- CodeGen getFeature(), setFeature() and friends are now inlined.
- Added offset to TargetNode, which is filled after the code is serialized.
This commit is contained in:
kobalicek
2014-09-08 20:24:31 +02:00
parent d7fc62d9e9
commit 9ead0cfb4c
25 changed files with 980 additions and 604 deletions

View File

@@ -84,8 +84,6 @@ struct X86Test_AlignBase : public X86Test {
// Alloc, use and spill preserved registers.
if (_varCount) {
uint32_t gpCount = c.getRegCount().getGp();
c.comment("Var");
uint32_t varIndex = 0;
uint32_t regIndex = 0;
uint32_t regMask = 0x1;
@@ -107,12 +105,8 @@ struct X86Test_AlignBase : public X86Test {
// Do a sum of arguments to verify possible relocation when misaligned.
if (_argCount) {
uint32_t argIndex;
c.comment("Arg");
c.xor_(gpSum, gpSum);
for (argIndex = 0; argIndex < _argCount; argIndex++) {
for (uint32_t argIndex = 0; argIndex < _argCount; argIndex++) {
X86GpVar gpArg(c, kVarTypeInt32);
c.setArg(argIndex, gpArg);
@@ -121,7 +115,6 @@ struct X86Test_AlignBase : public X86Test {
}
// Check alignment of xmmVar (has to be 16).
c.comment("Ret");
c.lea(gpVar, xmmVar.m());
c.shl(gpVar.r32(), 28);

View File

@@ -43,8 +43,10 @@ Assembler::~Assembler() {
void Assembler::reset(bool releaseMemory) {
// CodeGen members.
_baseAddress = kNoBaseAddress;
_instOptions = 0;
_error = kErrorOk;
_options = 0;
_baseZone.reset(releaseMemory);
// Assembler members.
@@ -60,8 +62,8 @@ void Assembler::reset(bool releaseMemory) {
_comment = NULL;
_unusedLinks = NULL;
_labels.reset(releaseMemory);
_relocData.reset(releaseMemory);
_labelList.reset(releaseMemory);
_relocList.reset(releaseMemory);
}
// ============================================================================
@@ -131,11 +133,11 @@ Error Assembler::_reserve(size_t n) {
// ============================================================================
Error Assembler::_registerIndexedLabels(size_t index) {
size_t i = _labels.getLength();
size_t i = _labelList.getLength();
if (index < i)
return kErrorOk;
if (_labels._grow(index - i) != kErrorOk)
if (_labelList._grow(index - i) != kErrorOk)
return setError(kErrorNoHeapMemory);
LabelData data;
@@ -143,7 +145,7 @@ Error Assembler::_registerIndexedLabels(size_t index) {
data.links = NULL;
do {
_labels.append(data);
_labelList.append(data);
} while (++i < index);
return kErrorOk;
@@ -152,13 +154,13 @@ Error Assembler::_registerIndexedLabels(size_t index) {
Error Assembler::_newLabel(Label* dst) {
dst->_label.op = kOperandTypeLabel;
dst->_label.size = 0;
dst->_label.id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labels.getLength()));
dst->_label.id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labelList.getLength()));
LabelData data;
data.offset = -1;
data.links = NULL;
if (_labels.append(data) != kErrorOk)
if (_labelList.append(data) != kErrorOk)
goto _NoMemory;
return kErrorOk;
@@ -187,6 +189,80 @@ LabelLink* Assembler::_newLabelLink() {
return link;
}
Error Assembler::bind(const Label& label) {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelData(index);
// Label can be bound only once.
if (data->offset != -1)
return setError(kErrorLabelAlreadyBound);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleLabel, "L%u:\n", index);
#endif // !ASMJIT_DISABLE_LOGGER
Error error = kErrorOk;
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* prev = NULL;
while (link) {
intptr_t offset = link->offset;
if (link->relocId != -1) {
// Handle RelocData - We have to update RelocData information instead of
// patching the displacement in LabelData.
_relocList[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
// displacement in the binary stream.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = getByteAt(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
setInt32At(offset, patchedValue);
}
else {
ASMJIT_ASSERT(size == 1);
if (IntUtil::isInt8(patchedValue))
setByteAt(offset, static_cast<uint8_t>(patchedValue & 0xFF));
else
error = kErrorIllegalDisplacement;
}
}
prev = link->prev;
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == NULL)
prev = link;
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Set as bound (offset is zero or greater and no links).
data->offset = pos;
data->links = NULL;
if (error != kErrorOk)
return setError(error);
return error;
}
// ============================================================================
// [asmjit::Assembler - Embed]
// ============================================================================
@@ -210,6 +286,19 @@ Error Assembler::embed(const void* data, uint32_t size) {
return kErrorOk;
}
// ============================================================================
// [asmjit::Assembler - Reloc]
// ============================================================================
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const {
if (baseAddress == kNoBaseAddress)
baseAddress = hasBaseAddress() ? getBaseAddress() : static_cast<Ptr>((uintptr_t)dst);
else if (getBaseAddress() != baseAddress)
return 0;
return _relocCode(dst, baseAddress);
}
// ============================================================================
// [asmjit::Assembler - Make]
// ============================================================================
@@ -232,52 +321,52 @@ void* Assembler::make() {
// [asmjit::Assembler - Emit (Helpers)]
// ============================================================================
#define no noOperand
#define NA noOperand
Error Assembler::emit(uint32_t code) {
return _emit(code, no, no, no, no);
return _emit(code, NA, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0) {
return _emit(code, o0, no, no, no);
return _emit(code, o0, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1) {
return _emit(code, o0, o1, no, no);
return _emit(code, o0, o1, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
return _emit(code, o0, o1, o2, no);
return _emit(code, o0, o1, o2, NA);
}
Error Assembler::emit(uint32_t code, int o0) {
Imm imm(o0);
return _emit(code, imm, no, no, no);
return _emit(code, imm, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, uint64_t o0) {
Imm imm(o0);
return _emit(code, imm, no, no, no);
return _emit(code, imm, NA, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, int o1) {
Imm imm(o1);
return _emit(code, o0, imm, no, no);
return _emit(code, o0, imm, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, uint64_t o1) {
Imm imm(o1);
return _emit(code, o0, imm, no, no);
return _emit(code, o0, imm, NA, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) {
Imm imm(o2);
return _emit(code, o0, o1, imm, no);
return _emit(code, o0, o1, imm, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2) {
Imm imm(o2);
return _emit(code, o0, o1, imm, no);
return _emit(code, o0, o1, imm, NA);
}
Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) {
@@ -290,7 +379,7 @@ Error Assembler::emit(uint32_t code, const Operand& o0, const Operand& o1, const
return _emit(code, o0, o1, o2, imm);
}
#undef no
#undef NA
} // asmjit namespace

View File

@@ -321,31 +321,58 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
// [Label]
// --------------------------------------------------------------------------
//! Get count of labels created.
//! Get number of labels created.
ASMJIT_INLINE size_t getLabelsCount() const {
return _labels.getLength();
return _labelList.getLength();
}
//! Get whether `label` is created.
ASMJIT_INLINE bool isLabelCreated(const Label& label) const {
return static_cast<size_t>(label.getId()) < _labels.getLength();
//! Get whether the `label` is valid (created by the assembler).
ASMJIT_INLINE bool isLabelValid(const Label& label) const {
return isLabelValid(label.getId());
}
//! \internal
//! \overload
ASMJIT_INLINE bool isLabelValid(uint32_t id) const {
return static_cast<size_t>(id) < _labelList.getLength();
}
//! Get whether the `label` is bound.
//!
//! \note It's an error to pass label that is not valid. Check the validity
//! of the label by using `isLabelValid()` method before the bound check if
//! you are not sure about its validity, otherwise you may hit an assertion
//! failure in debug mode, and undefined behavior in release mode.
ASMJIT_INLINE bool isLabelBound(const Label& label) const {
return isLabelBound(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelBound(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id].offset != -1;
}
//! Get `label` offset or -1 if the label is not yet bound.
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id].offset;
}
//! Get `LabelData` by `label`.
ASMJIT_INLINE LabelData* getLabelData(const Label& label) const {
return getLabelDataById(label.getId());
return getLabelData(label.getId());
}
//! \internal
//!
//! Get `LabelData` by `id`.
ASMJIT_INLINE LabelData* getLabelDataById(uint32_t id) const {
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(id < _labels.getLength());
return const_cast<LabelData*>(&_labels[id]);
//! \overload
ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return const_cast<LabelData*>(&_labelList[id]);
}
//! \internal
@@ -370,22 +397,17 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
return result;
}
//! Bind label to the current offset.
virtual void _bind(const Label& label) = 0;
//! Bind label to the current offset.
//!
//! \note Label can be bound only once!
ASMJIT_INLINE void bind(const Label& label) {
_bind(label);
}
ASMJIT_API virtual Error bind(const Label& label);
// --------------------------------------------------------------------------
// [Embed]
// --------------------------------------------------------------------------
//! Embed data into the code buffer.
ASMJIT_API Error embed(const void* data, uint32_t size);
ASMJIT_API virtual Error embed(const void* data, uint32_t size);
// --------------------------------------------------------------------------
// [Align]
@@ -396,57 +418,41 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
//! Typical usage of this is to align labels at start of the inner loops.
//!
//! Inserts `nop()` instructions or CPU optimized NOPs.
ASMJIT_INLINE Error align(uint32_t mode, uint32_t offset) {
return _align(mode, offset);
}
//! \internal
//!
//! Align target buffer to `m` bytes.
virtual Error _align(uint32_t mode, uint32_t offset) = 0;
virtual Error align(uint32_t mode, uint32_t offset) = 0;
// --------------------------------------------------------------------------
// [Reloc]
// --------------------------------------------------------------------------
//! Simplifed version of `relocCode()` method designed for JIT.
//! Relocate the code to `baseAddress` and copy to `dst`.
//!
//! \overload
ASMJIT_INLINE size_t relocCode(void* dst) const {
return _relocCode(dst, static_cast<Ptr>((uintptr_t)dst));
}
//! Relocate code to a given address `dst`.
//! \param dst Contains the location where the relocated code should be
//! copied. The pointer can be address returned by virtual memory allocator
//! or any other address that has sufficient space.
//!
//! \param dst Refers the location where the relocated code should be copied.
//! The pointer can be address returned by virtual memory allocator or any
//! custom address.
//! \param base Base address used for relocation. The `JitRuntime` always
//! sets the `base` address to be the same as `dst`, but other runtimes, for
//! example `StaticRuntime`, do not have to follow this rule.
//!
//! \param base Base address used for relocation. `JitRuntime` always sets
//! `base` address to be the same as `dst`, but other runtimes do not have
//! to follow this rule.
//! \retval The number bytes actually used. If the code generator reserved
//! space for possible trampolines, but didn't use it, the number of bytes
//! used can actually be less than the expected worst case. Virtual memory
//! allocator can shrink the memory allocated first time.
//!
//! \retval The number bytes used. If the code generator reserved space for
//! possible trampolines, but these weren't generated, the number of bytes
//! used can be actually less than the expected worst case. Virtual memory
//! allocator can in such case return some memory back to the pool.
//!
//! A given buffer will be overwritten, to get number of bytes required use
//! `getCodeSize()`.
ASMJIT_INLINE size_t relocCode(void* dst, Ptr base) const {
return _relocCode(dst, base);
}
//! A given buffer will be overwritten, to get the number of bytes required,
//! use `getCodeSize()`.
ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const;
//! \internal
//!
//! Reloc code.
virtual size_t _relocCode(void* dst, Ptr base) const = 0;
virtual size_t _relocCode(void* dst, Ptr baseAddress) const = 0;
// --------------------------------------------------------------------------
// [Make]
// --------------------------------------------------------------------------
ASMJIT_API void* make();
ASMJIT_API virtual void* make();
// --------------------------------------------------------------------------
// [Emit]
@@ -504,16 +510,15 @@ struct ASMJIT_VCLASS Assembler : public CodeGen {
//! Size of possible trampolines.
uint32_t _trampolineSize;
//! Inline comment that will be logged by the next instruction and
//! set to NULL.
//! Inline comment that will be logged by the next instruction and set to NULL.
const char* _comment;
//! Unused `LabelLink` structures pool.
LabelLink* _unusedLinks;
//! Labels data.
PodVector<LabelData> _labels;
//! Relocations data.
PodVector<RelocData> _relocData;
//! LabelData list.
PodVector<LabelData> _labelList;
//! RelocData list.
PodVector<RelocData> _relocList;
};
//! \}

View File

@@ -24,11 +24,13 @@ CodeGen::CodeGen(Runtime* runtime) :
_runtime(runtime),
_logger(NULL),
_errorHandler(NULL),
_baseAddress(runtime->getBaseAddress()),
_arch(kArchNone),
_regSize(0),
_features(static_cast<uint8_t>(IntUtil::mask(kCodeGenOptimizedAlign))),
_reserved(0),
_features(IntUtil::mask(kCodeGenOptimizedAlign)),
_instOptions(0),
_error(kErrorOk),
_options(0),
_baseZone(16384 - kZoneOverhead) {}
CodeGen::~CodeGen() {
@@ -103,28 +105,6 @@ Error CodeGen::setErrorHandler(ErrorHandler* handler) {
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeGen - Features]
// ============================================================================
bool CodeGen::hasFeature(uint32_t feature) const {
if (feature >= sizeof(_features) * 8)
return false;
feature = 1 << feature;
return (_features & feature) != 0;
}
Error CodeGen::setFeature(uint32_t feature, bool value) {
if (feature >= sizeof(_features) * 8)
return setError(kErrorInvalidArgument);
feature = static_cast<uint32_t>(value) << feature;
_features = static_cast<uint8_t>((static_cast<uint32_t>(_features) & ~feature) | feature);
return kErrorOk;
}
} // asmjit namespace
// [Api-End]

View File

@@ -65,7 +65,7 @@ ASMJIT_ENUM(kCodeGen) {
//! Schedule instructions so they can be executed faster (`Compiler` only).
//!
//! Default `false`, has to be explicitly enabled because it scheduler needs
//! Default `false` - has to be explicitly enabled as the scheduler needs
//! some time to run.
//!
//! X86/X64
@@ -97,9 +97,13 @@ ASMJIT_ENUM(kAlignMode) {
//! Relocation mode.
ASMJIT_ENUM(kRelocMode) {
//! Relocate an absolute address to an absolute address.
kRelocAbsToAbs = 0,
//! Relocate a relative address to an absolute address.
kRelocRelToAbs = 1,
//! Relocate an absolute address to a relative address.
kRelocAbsToRel = 2,
//! Relocate an absolute address to a relative address or use trampoline.
kRelocTrampoline = 3
};
@@ -163,7 +167,33 @@ struct ASMJIT_VCLASS CodeGen {
}
// --------------------------------------------------------------------------
// [Error]
// [BaseAddress]
// --------------------------------------------------------------------------
//! Get whether the code-generator has a base address.
//!
//! \sa \ref getBaseAddress()
ASMJIT_INLINE bool hasBaseAddress() const {
return _baseAddress != kNoBaseAddress;
}
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
//! Set the base address to `baseAddress`.
ASMJIT_INLINE void setBaseAddress(Ptr baseAddress) {
_baseAddress = baseAddress;
}
//! Reset the base address.
ASMJIT_INLINE void resetBaseAddress() {
setBaseAddress(kNoBaseAddress);
}
// --------------------------------------------------------------------------
// [LastError / ErrorHandler]
// --------------------------------------------------------------------------
//! Get last error code.
@@ -193,35 +223,55 @@ struct ASMJIT_VCLASS CodeGen {
}
// --------------------------------------------------------------------------
// [Features]
// [Code-Generation Features]
// --------------------------------------------------------------------------
//! Get code-generator `feature`.
ASMJIT_API bool hasFeature(uint32_t feature) const;
ASMJIT_INLINE bool hasFeature(uint32_t feature) const {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
}
//! Set code-generator `feature` to `value`.
ASMJIT_API Error setFeature(uint32_t feature, bool value);
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
}
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const {
return _features;
}
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) {
_features = features;
}
// --------------------------------------------------------------------------
// [Options]
// [Instruction Options]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getOptions() const {
return _options;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setOptions(uint32_t options) {
_options = options;
ASMJIT_INLINE uint32_t getInstOptions() const {
return _instOptions;
}
//! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getOptionsAndReset() {
uint32_t options = _options;
_options = 0;
return options;
ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
uint32_t instOptions = _instOptions;
_instOptions = 0;
return instOptions;
};
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) {
_instOptions = instOptions;
}
// --------------------------------------------------------------------------
// [Make]
// --------------------------------------------------------------------------
@@ -238,31 +288,39 @@ struct ASMJIT_VCLASS CodeGen {
// [Members]
// --------------------------------------------------------------------------
//! Runtime.
//! Target runtime.
Runtime* _runtime;
#if !defined(ASMJIT_DISABLE_LOGGER)
//! Logger.
Logger* _logger;
#else
// Makes the libraries built with/without logging support binary compatible.
//! \internal
//!
//! Makes libraries built with or without logging support binary compatible.
void* _logger;
#endif // ASMJIT_DISABLE_LOGGER
//! Error handler, called by \ref setError().
ErrorHandler* _errorHandler;
//! Target architecture.
uint8_t _arch;
//! Target general-purpose register size (4 or 8 bytes).
uint8_t _regSize;
//! Target features.
uint8_t _features;
//! Last error code.
uint8_t _error;
//! Base address (-1 if unknown/not used).
Ptr _baseAddress;
//! Options for the next generated instruction (only 8-bits used).
uint32_t _options;
//! Target architecture ID.
uint8_t _arch;
//! Target architecture GP register size in bytes (4 or 8).
uint8_t _regSize;
//! \internal
uint16_t _reserved;
//! Code-Generation features, used by \ref hasFeature() and \ref setFeature().
uint32_t _features;
//! Options affecting the next instruction.
uint32_t _instOptions;
//! Last error code.
uint32_t _error;
//! Base zone.
Zone _baseZone;

View File

@@ -44,6 +44,7 @@ Compiler::Compiler(Runtime* runtime) :
_nodeFlags(0),
_maxLookAhead(kBaseCompilerDefaultLookAhead),
_targetVarMapping(NULL),
_assembler(NULL),
_firstNode(NULL),
_lastNode(NULL),
_cursor(NULL),
@@ -55,7 +56,10 @@ Compiler::Compiler(Runtime* runtime) :
_globalConstPool(&_baseZone) {}
Compiler::~Compiler() {
reset();
reset(true);
if (_assembler != NULL)
delete _assembler;
}
// ============================================================================
@@ -64,14 +68,19 @@ Compiler::~Compiler() {
void Compiler::reset(bool releaseMemory) {
// CodeGen members.
_baseAddress = kNoBaseAddress;
_instOptions = 0;
_error = kErrorOk;
_options = 0;
_baseZone.reset(releaseMemory);
// Compiler members.
_nodeFlowId = 0;
_nodeFlags = 0;
if (_assembler != NULL)
_assembler->reset(releaseMemory);
_firstNode = NULL;
_lastNode = NULL;
@@ -88,8 +97,8 @@ void Compiler::reset(bool releaseMemory) {
_stringZone.reset(releaseMemory);
_localConstZone.reset(releaseMemory);
_targets.reset(releaseMemory);
_vars.reset(releaseMemory);
_targetList.reset(releaseMemory);
_varList.reset(releaseMemory);
}
// ============================================================================
@@ -293,9 +302,9 @@ AlignNode* Compiler::addAlign(uint32_t mode, uint32_t offset) {
TargetNode* Compiler::newTarget() {
TargetNode* node = newNode<TargetNode>(
OperandUtil::makeLabelId(static_cast<uint32_t>(_targets.getLength())));
OperandUtil::makeLabelId(static_cast<uint32_t>(_targetList.getLength())));
if (node == NULL || _targets.append(node) != kErrorOk)
if (node == NULL || _targetList.append(node) != kErrorOk)
goto _NoMemory;
return node;
@@ -330,11 +339,12 @@ _NoMemory:
return setError(kErrorNoHeapMemory);
}
void Compiler::bind(const Label& label) {
Error Compiler::bind(const Label& label) {
uint32_t index = label.getId();
ASMJIT_ASSERT(index < _targets.getLength());
ASMJIT_ASSERT(index < _targetList.getLength());
addNode(_targets[index]);
addNode(_targetList[index]);
return kErrorOk;
}
// ============================================================================
@@ -460,7 +470,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
goto _NoMemory;
vd->_name = noName;
vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_vars.getLength()));
vd->_id = OperandUtil::makeVarId(static_cast<uint32_t>(_varList.getLength()));
vd->_contextId = kInvalidValue;
if (name != NULL && name[0] != '\0') {
@@ -495,7 +505,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
vd->_va = NULL;
if (_vars.append(vd) != kErrorOk)
if (_varList.append(vd) != kErrorOk)
goto _NoMemory;
return vd;
@@ -575,6 +585,33 @@ void Compiler::rename(Var& var, const char* name) {
}
}
// ============================================================================
// [asmjit::Compiler - Assembler]
// ============================================================================
Assembler* Compiler::getAssembler() {
Assembler* a = _assembler;
if (a != NULL) {
a->reset(false);
}
else {
a = _newAssembler();
_assembler = a;
}
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger != NULL)
a->setLogger(logger);
#endif // !ASMJIT_DISABLE_LOGGER
a->setBaseAddress(_baseAddress);
a->setFeatures(_features);
return a;
}
} // asmjit namespace
// [Api-End]

View File

@@ -2023,6 +2023,7 @@ struct TargetNode : public Node {
ASMJIT_INLINE TargetNode(Compiler* compiler, uint32_t labelId) : Node(compiler, kNodeTypeTarget) {
_id = labelId;
_numRefs = 0;
_offset = -1;
_from = NULL;
}
@@ -2058,6 +2059,14 @@ struct TargetNode : public Node {
//! Subtract number of jumps to this target.
ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; }
//! Get the label offset.
//!
//! \note Only valid after the content has been serialized to the `Assembler`.
ASMJIT_INLINE intptr_t getOffset() const { return _offset; }
//! Set the label offset.
ASMJIT_INLINE void setOffset(intptr_t offset) { _offset = offset; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
@@ -2067,6 +2076,8 @@ struct TargetNode : public Node {
//! Count of jumps here.
uint32_t _numRefs;
//! Label offset, after serialization.
intptr_t _offset;
//! First jump instruction that points to this target (label).
JumpNode* _from;
};
@@ -2454,16 +2465,15 @@ struct FuncNode : public Node {
//! Required stack alignment (usually for multimedia instructions).
uint32_t _requiredStackAlignment;
//! The "Red Zone" suze - count of bytes which might be accessed
//! without adjusting the stack pointer.
//! The "Red Zone" size - count of bytes which might be accessed without
//! adjusting the stack pointer.
uint16_t _redZoneSize;
//! Spill zone size (zone used by WIN64ABI).
uint16_t _spillZoneSize;
//! Stack size needed for function arguments.
uint32_t _argStackSize;
//! Stack size needed for all variables and memory allocated on
//! the stack.
//! Stack size needed for all variables and memory allocated on the stack.
uint32_t _memStackSize;
//! Stack size needed to call other functions.
uint32_t _callStackSize;
@@ -2683,9 +2693,14 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
// --------------------------------------------------------------------------
//! Get maximum look ahead.
ASMJIT_INLINE uint32_t getMaxLookAhead() const { return _maxLookAhead; }
ASMJIT_INLINE uint32_t getMaxLookAhead() const {
return _maxLookAhead;
}
//! Set maximum look ahead to `val`.
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { _maxLookAhead = val; }
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) {
_maxLookAhead = val;
}
// --------------------------------------------------------------------------
// [Clear / Reset]
@@ -2787,9 +2802,9 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get `TargetNode` by `id`.
ASMJIT_INLINE TargetNode* getTargetById(uint32_t id) {
ASMJIT_ASSERT(OperandUtil::isLabelId(id));
ASMJIT_ASSERT(id < _targets.getLength());
ASMJIT_ASSERT(id < _targetList.getLength());
return _targets[id];
return _targetList[id];
}
//! Get `TargetNode` by `label`.
@@ -2803,12 +2818,43 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get count of created labels.
ASMJIT_INLINE size_t getLabelsCount() const {
return _targets.getLength();
return _targetList.getLength();
}
//! Get whether `label` is created.
ASMJIT_INLINE bool isLabelCreated(const Label& label) const {
return static_cast<size_t>(label.getId()) < _targets.getLength();
ASMJIT_INLINE bool isLabelValid(const Label& label) const {
return isLabelValid(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelValid(uint32_t id) const {
return static_cast<size_t>(id) < _targetList.getLength();
}
//! Get `TargetNode` by `label`.
ASMJIT_INLINE TargetNode* getTargetByLabel(const Label& label) {
return getTargetByLabel(label.getId());
}
//! \overload
ASMJIT_INLINE TargetNode* getTargetByLabel(uint32_t id) {
ASMJIT_ASSERT(isLabelValid(id));
return _targetList[id];
}
//! Get `label` offset or -1 if the label is not bound.
//!
//! This method can be only called after the code has been serialized to the
//! `Assembler`, otherwise the offset returned will be -1 (even if the label
//! has been bound).
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const {
ASMJIT_ASSERT(isLabelValid(id));
return _targetList[id]->getOffset();
}
//! \internal
@@ -2826,7 +2872,7 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Bind label to the current offset.
//!
//! \note Label can be bound only once!
ASMJIT_API void bind(const Label& label);
ASMJIT_API Error bind(const Label& label);
// --------------------------------------------------------------------------
// [Embed]
@@ -2868,8 +2914,8 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
// --------------------------------------------------------------------------
//! Get whether variable `var` is created.
ASMJIT_INLINE bool isVarCreated(const Var& var) const {
return static_cast<size_t>(var.getId() & kOperandIdNum) < _vars.getLength();
ASMJIT_INLINE bool isVarValid(const Var& var) const {
return static_cast<size_t>(var.getId() & kOperandIdNum) < _varList.getLength();
}
//! \internal
@@ -2884,16 +2930,16 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Get `VarData` by `id`.
ASMJIT_INLINE VarData* getVdById(uint32_t id) const {
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(static_cast<size_t>(id & kOperandIdNum) < _vars.getLength());
ASMJIT_ASSERT(static_cast<size_t>(id & kOperandIdNum) < _varList.getLength());
return _vars[id & kOperandIdNum];
return _varList[id & kOperandIdNum];
}
//! \internal
//!
//! Get an array of 'VarData*'.
ASMJIT_INLINE VarData** _getVdArray() const {
return const_cast<VarData**>(_vars.getData());
return const_cast<VarData**>(_varList.getData());
}
//! \internal
@@ -2950,17 +2996,36 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0;
// --------------------------------------------------------------------------
// [Assembler]
// --------------------------------------------------------------------------
//! Get an assembler instance that is associated with the compiler.
//!
//! \note One instance of `Assembler` is shared and has lifetime same as the
//! compiler, however, each call to `getAssembler()` resets the assembler so
//! new code can be serialized into it.
ASMJIT_API Assembler* getAssembler();
//! \internal
//!
//! Create a new `Assembler` instance associated with the compiler.
virtual Assembler* _newAssembler() = 0;
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------
//! Send assembled code to `assembler`.
virtual Error serialize(Assembler& assembler) = 0;
//! Serialize a compiled code to `assembler`.
virtual Error serialize(Assembler* assembler) = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Internal assembler.
Assembler* _assembler;
//! Flow id added to each node created (used only by `Context)`.
uint32_t _nodeFlowId;
//! Flags added to each node created (used only by `Context)`.
@@ -2990,10 +3055,10 @@ struct ASMJIT_VCLASS Compiler : public CodeGen {
//! Local constant pool zone.
Zone _localConstZone;
//! Targets.
PodVector<TargetNode*> _targets;
//! Variables.
PodVector<VarData*> _vars;
//! TargetNode list.
PodVector<TargetNode*> _targetList;
//! VarData list.
PodVector<VarData*> _varList;
//! Local constant pool, flushed at the end of each function.
ConstPool _localConstPool;

View File

@@ -30,7 +30,7 @@
namespace asmjit {
// ============================================================================
// [asmjit::CpuInfo - DetectNumberOfCores]
// [asmjit::CpuInfo - DetectHwThreadsCount]
// ============================================================================
uint32_t CpuInfo::detectHwThreadsCount() {

View File

@@ -102,7 +102,7 @@ struct CpuInfo {
// [Statics]
// --------------------------------------------------------------------------
//! Detect number of cores (or sum of all cores of all processors).
//! Detect the number of hardware threads.
static ASMJIT_API uint32_t detectHwThreadsCount();
//! Get host cpu.

View File

@@ -44,11 +44,13 @@ static const char errorMessages[] = {
"No virtual memory\0"
"Invalid argument\0"
"Invalid state\0"
"No code generated\0"
"Code too large\0"
"Label already bound\0"
"Unknown instruction\0"
"Illegal instruction\0"
"Illegal addressing\0"
"Illegal displacement\0"
"Invalid function\0"
"Overlapped arguments\0"
"Unknown error\0"
};

View File

@@ -42,11 +42,25 @@ ASMJIT_ENUM(kError) {
//! Invalid state.
kErrorInvalidState = 4,
//! Unknown instruction. This happens only if instruction code is
//! out of bounds. Shouldn't happen.
kErrorUnknownInst = 5,
//! No code generated.
//!
//! Returned by runtime if the code-generator contains no code.
kErrorNoCodeGenerated = 5,
//! Illegal instruction (Assembler).
//! Code generated is too large to fit in memory reserved.
//!
//! Returned by `StaticRuntime` in case that the code generated is too large
//! to fit in the memory already reserved for it.
kErrorCodeTooLarge = 6,
//! Label is already bound.
kErrorLabelAlreadyBound = 7,
//! Unknown instruction (an instruction ID is out of bounds or instruction
//! name is invalid).
kErrorUnknownInst = 8,
//! Illegal instruction.
//!
//! This status code can also be returned in X64 mode if AH, BH, CH or DH
//! registers have been used together with a REX prefix. The instruction
@@ -64,30 +78,25 @@ ASMJIT_ENUM(kError) {
//! ~~~
//!
//! \note In debug mode assertion is raised instead of returning an error.
kErrorIllegalInst = 6,
kErrorIllegalInst = 9,
//! Illegal (unencodable) addressing used (Assembler).
kErrorIllegalAddresing = 7,
//! Illegal (unencodable) addressing used.
kErrorIllegalAddresing = 10,
//! Illegal (unencodable) displacement used (Assembler).
//! Illegal (unencodable) displacement used.
//!
//! X86/X64
//! -------
//!
//! Short form of jump instruction has been used, but the displacement is out
//! of bounds.
kErrorIllegalDisplacement = 8,
//! Invalid function (Compiler).
//!
//! Returned if no function is defined, but `make()` has been called.
kErrorInvalidFunction = 9,
kErrorIllegalDisplacement = 11,
//! A variable has been assigned more than once to a function argument (Compiler).
kErrorOverlappedArgs = 10,
kErrorOverlappedArgs = 12,
//! Count of AsmJit status codes. Can grow in future.
kErrorCount = 11
//! Count of AsmJit error codes.
kErrorCount = 13
};
// ============================================================================

View File

@@ -19,6 +19,20 @@ namespace asmjit {
//! \addtogroup asmjit_base_general
//! \{
// ============================================================================
// [asmjit::Ptr / SignedPtr]
// ============================================================================
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for cross-platform code generation.
typedef uint64_t Ptr;
//! 64-bit signed pointer, like \ref Ptr, but made signed.
typedef int64_t SignedPtr;
// ============================================================================
// [asmjit::kGlobals]
// ============================================================================
@@ -30,6 +44,9 @@ namespace asmjit {
//! string is not known and has to be determined.
static const size_t kInvalidIndex = ~static_cast<size_t>(0);
//! Invalid base address.
static const Ptr kNoBaseAddress = static_cast<Ptr>(static_cast<SignedPtr>(-1));
//! Global constants.
ASMJIT_ENUM(kGlobals) {
//! Invalid value or operand id.
@@ -89,20 +106,6 @@ ASMJIT_ENUM(kArch) {
kArchHost64Bit = sizeof(intptr_t) >= 8
};
// ============================================================================
// [asmjit::Ptr / SignedPtr]
// ============================================================================
//! 64-bit unsigned pointer, compatible with JIT and non-JIT generators.
//!
//! This is the preferred pointer type to use with AsmJit library. It has a
//! capability to hold any pointer for any architecture making it an ideal
//! candidate for cross-platform code generation.
typedef uint64_t Ptr;
//! 64-bit signed pointer, like \ref Ptr, but made signed.
typedef int64_t SignedPtr;
//! \}
// ============================================================================

View File

@@ -22,39 +22,53 @@ namespace asmjit {
// [asmjit::Runtime - Construction / Destruction]
// ============================================================================
Runtime::Runtime() {}
Runtime::Runtime() {
_sizeLimit = 0;
_runtimeType = kRuntimeTypeNone;
_allocType = kVMemAllocFreeable;
::memset(_reserved, 0, sizeof(_reserved));
_baseAddress = kNoBaseAddress;
}
Runtime::~Runtime() {}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() :
_allocType(kVMemAllocFreeable) {}
HostRuntime::HostRuntime() {
_runtimeType = kRuntimeTypeJit;
}
JitRuntime::~JitRuntime() {}
HostRuntime::~HostRuntime() {}
// ============================================================================
// [asmjit::JitRuntime - Get]
// [asmjit::HostRuntime - Interface]
// ============================================================================
uint32_t JitRuntime::getStackAlignment() {
const CpuInfo* HostRuntime::getCpuInfo() {
return CpuInfo::getHost();
}
uint32_t HostRuntime::getStackAlignment() {
uint32_t alignment = sizeof(intptr_t);
#if defined(ASMJIT_HOST_X86)
// Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm
// not sure about all other UNIX operating systems, because 16-byte alignment
// is addition to an older specification.
#if (defined(__linux__) || \
defined(__linux) || \
defined(__unix__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__) || \
defined(__DARWIN__) || \
defined(__APPLE__) )
# if (defined(__linux__) || \
defined(__linux) || \
defined(__unix__) || \
defined(__FreeBSD__) || \
defined(__NetBSD__) || \
defined(__OpenBSD__) || \
defined(__DARWIN__) || \
defined(__APPLE__) )
alignment = 16;
#endif
# endif
#elif defined(ASMJIT_HOST_X64)
alignment = 16;
#endif
@@ -62,49 +76,7 @@ uint32_t JitRuntime::getStackAlignment() {
return alignment;
}
const CpuInfo* JitRuntime::getCpuInfo() {
return CpuInfo::getHost();
}
// ============================================================================
// [asmjit::JitRuntime - Add]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) {
// Disallow empty code generation.
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = NULL;
return kErrorInvalidFunction;
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == NULL) {
*dst = NULL;
return kErrorNoVirtualMemory;
}
// Relocate the code.
size_t relocSize = assembler->relocCode(p);
// Return unused memory to `VMemMgr`.
if (relocSize < codeSize)
_memMgr.shrink(p, relocSize);
// Return the code.
*dst = p;
flush(p, relocSize);
return kErrorOk;
}
Error JitRuntime::release(void* p) {
return _memMgr.release(p);
}
void JitRuntime::flush(void* p, size_t size) {
void HostRuntime::flush(void* p, size_t size) {
// Only useful on non-x86 architectures.
#if !defined(ASMJIT_HOST_X86) && !defined(ASMJIT_HOST_X64)
@@ -116,6 +88,103 @@ void JitRuntime::flush(void* p, size_t size) {
#endif // !ASMJIT_HOST_X86 && !ASMJIT_HOST_X64
}
// ============================================================================
// [asmjit::StaticRuntime - Construction / Destruction]
// ============================================================================
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) {
_sizeLimit = sizeLimit;
_baseAddress = static_cast<Ptr>((uintptr_t)baseAddress);
}
StaticRuntime::~StaticRuntime() {}
// ============================================================================
// [asmjit::StaticRuntime - Interface]
// ============================================================================
Error StaticRuntime::add(void** dst, Assembler* assembler) {
size_t codeSize = assembler->getCodeSize();
size_t sizeLimit = _sizeLimit;
if (codeSize == 0) {
*dst = NULL;
return kErrorNoCodeGenerated;
}
if (sizeLimit != 0 && sizeLimit < codeSize) {
*dst = NULL;
return kErrorCodeTooLarge;
}
Ptr baseAddress = _baseAddress;
uint8_t* p = static_cast<uint8_t*>((void*)static_cast<uintptr_t>(baseAddress));
// Since the base address is known the `relocSize` returned should be equal
// to `codeSize`. It's better to fail if they don't match instead of passsing
// silently.
size_t relocSize = assembler->relocCode(p, baseAddress);
if (relocSize == 0 || codeSize != relocSize) {
*dst = NULL;
return kErrorInvalidState;
}
_baseAddress += codeSize;
if (sizeLimit)
sizeLimit -= codeSize;
flush(p, codeSize);
*dst = p;
return kErrorOk;
}
Error StaticRuntime::release(void* p) {
// There is nothing to release as `StaticRuntime` doesn't manage any memory.
ASMJIT_UNUSED(p);
return kErrorOk;
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() {}
JitRuntime::~JitRuntime() {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = NULL;
return kErrorNoCodeGenerated;
}
void* p = _memMgr.alloc(codeSize, getAllocType());
if (p == NULL) {
*dst = NULL;
return kErrorNoVirtualMemory;
}
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = assembler->relocCode(p);
if (relocSize < codeSize) {
_memMgr.shrink(p, relocSize);
}
flush(p, relocSize);
*dst = p;
return kErrorOk;
}
Error JitRuntime::release(void* p) {
return _memMgr.release(p);
}
} // asmjit namespace
// [Api-End]

View File

@@ -27,6 +27,17 @@ struct CpuInfo;
//! \addtogroup asmjit_base_general
//! \{
// ============================================================================
// [asmjit::kRuntimeType]
// ============================================================================
ASMJIT_ENUM(kRuntimeType) {
kRuntimeTypeNone = 0,
kRuntimeTypeJit = 1,
kRuntimeTypeRemote = 2
};
// ============================================================================
// [asmjit::Runtime]
// ============================================================================
@@ -44,16 +55,37 @@ struct ASMJIT_VCLASS Runtime {
//! Destroy the `Runtime` instance.
ASMJIT_API virtual ~Runtime();
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get runtime type.
ASMJIT_INLINE uint32_t getRuntimeType() const {
return _runtimeType;
}
//! Get whether the runtime has a base address.
//!
//! \sa \ref getBaseAddress()
ASMJIT_INLINE bool hasBaseAddress() const {
return _baseAddress == kNoBaseAddress;
}
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Get stack alignment of target runtime.
virtual uint32_t getStackAlignment() = 0;
//! Get CPU information.
virtual const CpuInfo* getCpuInfo() = 0;
//! Get stack alignment of target runtime.
virtual uint32_t getStackAlignment() = 0;
//! Allocate a memory needed for a code generated by `assembler` and
//! relocate it to the target location.
//!
@@ -64,6 +96,108 @@ struct ASMJIT_VCLASS Runtime {
//! Release memory allocated by `add`.
virtual Error release(void* p) = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Type of the runtime.
uint8_t _runtimeType;
//! Type of the allocation.
uint8_t _allocType;
//! \internal
uint8_t _reserved[sizeof(intptr_t) - 2];
};
// ============================================================================
// [asmjit::HostRuntime]
// ============================================================================
//! Base runtime for JIT code generation.
struct ASMJIT_VCLASS HostRuntime : public Runtime {
ASMJIT_NO_COPY(HostRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `HostRuntime` instance.
ASMJIT_API HostRuntime();
//! Destroy the `HostRuntime` instance.
ASMJIT_API virtual ~HostRuntime();
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual const CpuInfo* getCpuInfo();
ASMJIT_API virtual uint32_t getStackAlignment();
//! Flush an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor cache.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! that do not have a transparent instruction cache.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size);
};
// ============================================================================
// [asmjit::StaticRuntime]
// ============================================================================
//! JIT static runtime.
//!
//! JIT static runtime can be used to generate code to a memory location that
//! is known.
struct ASMJIT_VCLASS StaticRuntime : public HostRuntime {
ASMJIT_NO_COPY(StaticRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `StaticRuntime` instance.
//!
//! The `address` specifies a fixed target address, which will be used as a
//! base address for relocation, and `sizeLimit` specified the maximum size
//! of a code that can be copied to it. If there is no limit `sizeLimit`
//! should be zero.
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0);
//! Destroy the `StaticRuntime` instance.
ASMJIT_API virtual ~StaticRuntime();
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const {
return _baseAddress;
}
//! Get the maximum size of the code that can be relocated to the target
//! address or zero if unlimited.
ASMJIT_INLINE size_t getSizeLimit() const {
return _sizeLimit;
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p);
};
// ============================================================================
@@ -71,7 +205,7 @@ struct ASMJIT_VCLASS Runtime {
// ============================================================================
//! JIT runtime.
struct ASMJIT_VCLASS JitRuntime : public Runtime {
struct ASMJIT_VCLASS JitRuntime : public HostRuntime {
ASMJIT_NO_COPY(JitRuntime)
// --------------------------------------------------------------------------
@@ -87,16 +221,6 @@ struct ASMJIT_VCLASS JitRuntime : public Runtime {
// [Accessors]
// --------------------------------------------------------------------------
// Note: These members can be ignored by all derived classes. They are here
// only to privide default implementation. All other implementations (remote
// code patching or making dynamic loadable libraries/executables) ignore
// members accessed by these accessors.
//! Get the `VMemMgr` instance.
ASMJIT_INLINE VMemMgr* getMemMgr() const {
return const_cast<VMemMgr*>(&_memMgr);
}
//! Get the type of allocation.
ASMJIT_INLINE uint32_t getAllocType() const {
return _allocType;
@@ -107,37 +231,24 @@ struct ASMJIT_VCLASS JitRuntime : public Runtime {
_allocType = allocType;
}
//! Get the virtual memory manager.
ASMJIT_INLINE VMemMgr* getMemMgr() const {
return const_cast<VMemMgr*>(&_memMgr);
}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual uint32_t getStackAlignment();
ASMJIT_API virtual const CpuInfo* getCpuInfo();
ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p);
//! Flush instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes to flush the processor cache so it will not use the old data.
//!
//! Flushing is basically a NOP under X86/X64, but is needed by architectures
//! not having a transparent cache.
//!
//! This function can also be overridden to improve compatibility with tools
//! like Valgrind, but this is not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Virtual memory manager.
VMemMgr _memMgr;
//! Type of allocation.
uint32_t _allocType;
};
//! \}

View File

@@ -263,12 +263,13 @@
// ASMJIT_TRACE is only used by sources and private headers. It's safe to make
// it unavailable outside of AsmJit.
#if defined(ASMJIT_EXPORTS)
namespace asmjit { static inline int disabledTrace(...) {} }
# if defined(ASMJIT_TRACE)
# define ASMJIT_TSEC(_Section_) _Section_
# define ASMJIT_TLOG(...) ::printf(__VA_ARGS__)
# define ASMJIT_TLOG ::printf(__VA_ARGS__)
# else
# define ASMJIT_TSEC(_Section_) do {} while(0)
# define ASMJIT_TLOG(...) do {} while(0)
# define ASMJIT_TLOG 0 && ::asmjit::disabledTrace
# endif // ASMJIT_TRACE
#endif // ASMJIT_EXPORTS
@@ -347,10 +348,15 @@ typedef unsigned __int64 uint64_t;
#if defined(ASMJIT_OS_WINDOWS) && !defined(ASMJIT_SUPRESS_WINDOWS_H)
# if !defined(WIN32_LEAN_AND_MEAN)
# define WIN32_LEAN_AND_MEAN
# define ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif // !WIN32_LEAN_AND_MEAN
# if !defined(NOMINMAX)
# define NOMINMAX
# define ASMJIT_UNDEF_NOMINMAX
# endif
# endif // !NOMINMAX
# include <windows.h>
@@ -359,6 +365,11 @@ typedef unsigned __int64 uint64_t;
# undef ASMJIT_UNDEF_NOMINMAX
# endif
# if defined(ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN)
# undef WIN32_LEAN_AND_MEAN
# undef ASMJIT_UNDEF_WIN32_LEAN_AND_MEAN
# endif
#endif // ASMJIT_OS_WINDOWS && !ASMJIT_SUPRESS_WINDOWS_H
// ============================================================================

View File

@@ -4,6 +4,7 @@
// [License]
// Public Domain (Unlicense)
// [Dependencies - Broken]
#include "./broken.h"
// ============================================================================
@@ -125,30 +126,24 @@ static void BrokenAPI_runUnit(BrokenAPI::Unit* unit) {
static void BrokenAPI_runAll() {
BrokenAPI::Unit* unit = _brokenGlobal._unitList;
if (unit != NULL) {
size_t count = 0;
bool hasUnits = unit != NULL;
size_t count = 0;
do {
if (BrokenAPI_canRun(unit)) {
BrokenAPI_runUnit(unit);
count++;
}
unit = unit->next;
} while (unit != NULL);
if (count) {
INFO("\nSuccess:");
INFO(" All tests passed!");
}
else {
INFO("\nWarning:");
INFO(" No units matched the filter!");
while (unit != NULL) {
if (BrokenAPI_canRun(unit)) {
BrokenAPI_runUnit(unit);
count++;
}
unit = unit->next;
}
if (count) {
INFO("\nSuccess:");
INFO(" All tests passed!");
}
else {
INFO("\nWarning:");
INFO(" No units defined!");
INFO(" No units %s!", hasUnits ? "matched the filter" : "defined");
}
}
@@ -192,11 +187,13 @@ void BrokenAPI::setOutputFile(FILE* file) {
global._file = file;
}
void BrokenAPI::setContext(const char* file, int line) {
int BrokenAPI::setContext(const char* file, int line) {
BrokenGlobal& global = _brokenGlobal;
global._currentFile = file;
global._currentLine = line;
return 1;
}
int BrokenAPI::run(int argc, const char* argv[],
@@ -234,7 +231,7 @@ int BrokenAPI::run(int argc, const char* argv[],
return 0;
}
void BrokenAPI::info(const char* fmt, ...) {
int BrokenAPI::info(const char* fmt, ...) {
BrokenGlobal& global = _brokenGlobal;
FILE* dst = global.getFile();
@@ -253,9 +250,10 @@ void BrokenAPI::info(const char* fmt, ...) {
::fputs("\n", dst);
::fflush(dst);
return 1;
}
void BrokenAPI::fail(const char* fmt, va_list ap) {
int BrokenAPI::fail(const char* fmt, va_list ap) {
BrokenGlobal& global = _brokenGlobal;
FILE* dst = global.getFile();
@@ -276,4 +274,5 @@ void BrokenAPI::fail(const char* fmt, va_list ap) {
::fflush(dst);
::exit(1);
return 1;
}

View File

@@ -14,8 +14,8 @@
#include <stdlib.h>
#include <string.h>
// If using Doxygen to document a source-code hide everything. Ideally this
// can be also done by a macro, but there is no global and widely used one.
// Hide everything when using Doxygen. Ideally this can be protected by a macro,
// but there is not globally and widely used one across multiple projects.
//! \internal
//! \{
@@ -25,10 +25,10 @@
// ============================================================================
struct BrokenAPI {
//! Test entry point.
//! Entry point of a unit test defined by `UNIT` macro.
typedef void (*Entry)(void);
//! Test unit.
//! Test defined by `UNIT` macro.
struct Unit {
const char* name;
Entry entry;
@@ -48,14 +48,18 @@ struct BrokenAPI {
}
};
//! Register a new test (called automatically by `AutoUnit` and `UNIT`).
//! Register a new unit test (called automatically by `AutoUnit` and `UNIT`).
static void add(Unit* unit);
//! Set output file to `file`.
//! Set output file to a `file`.
static void setOutputFile(FILE* file);
//! Set the current context.
static void setContext(const char* file, int line);
//! Set the current context to `file` and `line`.
//!
//! This is called by `EXPECT` macro to set the correct `file` and `line`,
//! because `EXPECT` macro internally calls `expect()` function, which does
//! change the original file & line to non-interesting `broken.h`.
static int setContext(const char* file, int line);
//! Initialize `Broken` framework.
//!
@@ -64,29 +68,30 @@ struct BrokenAPI {
Entry onBeforeRun = (Entry)NULL,
Entry onAfterRun = (Entry)NULL);
//!
//! Used internally by `EXPECT` macro.
template<typename T>
static void expect(const T& exp, const char* fmt = NULL, ...) {
static int expect(const T& exp, const char* fmt = NULL, ...) {
if (exp)
return;
return 1;
va_list ap;
va_start(ap, fmt);
fail(fmt, ap);
va_end(ap);
return 0;
}
//! Log message, adds automatically new line if not present.
static void info(const char* fmt, ...);
static int info(const char* fmt, ...);
//! Called on `EXPECT()` failure.
static void fail(const char* fmt, va_list ap);
static int fail(const char* fmt, va_list ap);
};
// ============================================================================
// [Broken - Macros]
// ============================================================================
//! Define a unit.
//! Define a unit test.
//!
//! `_Name_` can only contain ASCII characters, numbers and underscore. It has
//! the same rules as identifiers in C and C++.
@@ -98,16 +103,15 @@ struct BrokenAPI {
\
static void unit_##_Name_##_entry(void)
//! Informative message printed to stdout.
#define INFO(...) \
::BrokenAPI::info(__VA_ARGS__)
//! #define INFO(...)
//!
//! Informative message printed to `stdout`.
#define INFO ::BrokenAPI::setContext(__FILE__, __LINE__) && ::BrokenAPI::info
//! Expect `_Exp_` to be truthy, fail otherwise.
#define EXPECT(...) \
do { \
::BrokenAPI::setContext(__FILE__, __LINE__); \
::BrokenAPI::expect(__VA_ARGS__); \
} while(0)
//! #define INFO(_Exp_ [, _Format_ [, ...]])
//!
//! Expect `_Exp_` to be true or evaluates to true, fail otherwise.
#define EXPECT ::BrokenAPI::setContext(__FILE__, __LINE__) && ::BrokenAPI::expect
//! \}

View File

@@ -105,6 +105,10 @@ static const uint8_t x86SegmentPrefix[8] = { 0x00, 0x26, 0x2E, 0x36, 0x3E, 0x64,
static const uint8_t x86OpCodePushSeg[8] = { 0x00, 0x06, 0x0E, 0x16, 0x1E, 0xA0, 0xA8 };
static const uint8_t x86OpCodePopSeg[8] = { 0x00, 0x07, 0x00, 0x17, 0x1F, 0xA1, 0xA9 };
// ============================================================================
// [Utils]
// ============================================================================
//! Encode MODR/M.
static ASMJIT_INLINE uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) {
return (m << 6) + (o << 3) + rm;
@@ -115,6 +119,13 @@ static ASMJIT_INLINE uint32_t x86EncodeSib(uint32_t s, uint32_t i, uint32_t b) {
return (s << 6) + (i << 3) + b;
}
//! Get whether the two pointers `a` and `b` can be encoded by using relative
//! displacement, which fits into a signed 32-bit integer.
static ASMJIT_INLINE bool x64IsRelative(Ptr a, Ptr b) {
SignedPtr diff = static_cast<SignedPtr>(a) - static_cast<SignedPtr>(b);
return IntUtil::isInt32(diff);
}
// ============================================================================
// [Macros]
// ============================================================================
@@ -291,76 +302,6 @@ Error X86Assembler::setArch(uint32_t arch) {
return kErrorInvalidArgument;
}
// ============================================================================
// [asmjit::X86Assembler - Label]
// ============================================================================
void X86Assembler::_bind(const Label& label) {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelDataById(index);
// Label can be bound only once.
ASMJIT_ASSERT(data->offset == -1);
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleLabel, "L%u:\n", index);
#endif // !ASMJIT_DISABLE_LOGGER
size_t pos = getOffset();
LabelLink* link = data->links;
LabelLink* prev = NULL;
while (link) {
intptr_t offset = link->offset;
if (link->relocId != -1) {
// If linked label points to RelocData then instead of writing relative
// displacement to assembler stream, we will write it to RelocData.
_relocData[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we overwriting real displacement
// in assembler stream.
int32_t patchedValue = static_cast<int32_t>(
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = getByteAt(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
setInt32At(offset, patchedValue);
}
else { // if (size) == 1
if (IntUtil::isInt8(patchedValue))
setByteAt(offset, static_cast<uint8_t>(patchedValue & 0xFF));
else
setError(kErrorIllegalDisplacement);
}
}
prev = link->prev;
link = prev;
}
// Chain unused links.
link = data->links;
if (link) {
if (prev == NULL)
prev = link;
prev->prev = _unusedLinks;
_unusedLinks = link;
}
// Unlink label if it was linked.
data->offset = pos;
data->links = NULL;
}
// ============================================================================
// [asmjit::X86Assembler - Embed]
// ============================================================================
@@ -374,7 +315,7 @@ Error X86Assembler::embedLabel(const Label& op) {
uint8_t* cursor = getCursor();
LabelData* label = getLabelDataById(op.getId());
LabelData* label = getLabelData(op.getId());
RelocData reloc;
#if !defined(ASMJIT_DISABLE_LOGGER)
@@ -398,12 +339,12 @@ Error X86Assembler::embedLabel(const Label& op) {
link->prev = (LabelLink*)label->links;
link->offset = getOffset();
link->displacement = 0;
link->relocId = _relocData.getLength();
link->relocId = _relocList.getLength();
label->links = link;
}
if (_relocData.append(reloc) != kErrorOk)
if (_relocList.append(reloc) != kErrorOk)
return setError(kErrorNoHeapMemory);
// Emit dummy intptr_t (4 or 8 bytes; depends on the address size).
@@ -420,7 +361,7 @@ Error X86Assembler::embedLabel(const Label& op) {
// [asmjit::X86Assembler - Align]
// ============================================================================
Error X86Assembler::_align(uint32_t mode, uint32_t offset) {
Error X86Assembler::align(uint32_t mode, uint32_t offset) {
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleDirective,
@@ -443,7 +384,7 @@ Error X86Assembler::_align(uint32_t mode, uint32_t offset) {
if (mode == kAlignCode) {
alignPattern = 0x90;
if (IntUtil::hasBit(_features, kCodeGenOptimizedAlign)) {
if (hasFeature(kCodeGenOptimizedAlign)) {
const X86CpuInfo* cpuInfo = static_cast<const X86CpuInfo*>(getRuntime()->getCpuInfo());
// NOPs optimized for Intel:
@@ -534,7 +475,7 @@ Error X86Assembler::_align(uint32_t mode, uint32_t offset) {
// [asmjit::X86Assembler - Reloc]
// ============================================================================
size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
uint32_t arch = getArch();
uint8_t* dst = static_cast<uint8_t*>(_dst);
@@ -542,48 +483,49 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
Logger* logger = getLogger();
#endif // ASMJIT_DISABLE_LOGGER
size_t codeOffset = getOffset();
size_t codeSize = getCodeSize();
size_t minCodeSize = getOffset(); // Current offset is the minimum code size.
size_t maxCodeSize = getCodeSize(); // Includes all possible trampolines.
// We will copy the exact size of the generated code. Extra code for trampolines
// is generated on-the-fly by the relocator (this code doesn't exist at the moment).
::memcpy(dst, _buffer, codeOffset);
::memcpy(dst, _buffer, minCodeSize);
// Trampoline pointer.
uint8_t* tramp = dst + codeOffset;
uint8_t* tramp = dst + minCodeSize;
// Relocate all recorded locations.
size_t relocIndex;
size_t relocCount = _relocData.getLength();
const RelocData* relocData = _relocData.getData();
size_t relocCount = _relocList.getLength();
const RelocData* relocData = _relocList.getData();
for (relocIndex = 0; relocIndex < relocCount; relocIndex++) {
const RelocData& r = relocData[relocIndex];
Ptr ptr;
for (size_t i = 0; i < relocCount; i++) {
const RelocData& r = relocData[i];
// Make sure that the `RelocData` is correct.
Ptr ptr = r.data;
size_t offset = static_cast<size_t>(r.from);
ASMJIT_ASSERT(offset + r.size <= static_cast<Ptr>(maxCodeSize));
// Whether to use trampoline, can be only used if relocation type is
// kRelocAbsToRel on 64-bit.
bool useTrampoline = false;
// Be sure that reloc data structure is correct.
size_t offset = static_cast<size_t>(r.from);
ASMJIT_ASSERT(offset + r.size <= static_cast<Ptr>(codeSize));
switch (r.type) {
case kRelocAbsToAbs:
ptr = r.data;
break;
case kRelocRelToAbs:
ptr = r.data + base;
ptr += baseAddress;
break;
case kRelocAbsToRel:
case kRelocTrampoline:
ptr = r.data - (base + r.from + 4);
ptr -= baseAddress + r.from + 4;
break;
if (arch == kArchX64 && r.type == kRelocTrampoline && !IntUtil::isInt32(ptr)) {
ptr = (Ptr)tramp - (base + r.from + 4);
case kRelocTrampoline:
ptr -= baseAddress + r.from + 4;
if (!IntUtil::isInt32(static_cast<SignedPtr>(ptr))) {
ptr = (Ptr)tramp - (baseAddress + r.from + 4);
useTrampoline = true;
}
break;
@@ -593,26 +535,30 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
}
switch (r.size) {
case 4: *reinterpret_cast<int32_t*>(dst + offset) = static_cast<int32_t>(ptr); break;
case 8: *reinterpret_cast<int64_t*>(dst + offset) = static_cast<int64_t>(ptr); break;
case 8:
*reinterpret_cast<int64_t*>(dst + offset) = static_cast<int64_t>(ptr);
break;
case 4:
*reinterpret_cast<int32_t*>(dst + offset) = static_cast<int32_t>(static_cast<SignedPtr>(ptr));
break;
default:
ASMJIT_ASSERT(!"Reached");
}
// Patch `jmp/call` to use trampoline.
if (arch == kArchX64 && useTrampoline) {
// Handle the case where trampoline has been used.
if (useTrampoline) {
// Bytes that replace [REX, OPCODE] bytes.
uint32_t byte0 = 0xFF;
uint32_t byte1 = dst[offset - 1];
if (byte1 == 0xE8) {
// Call, path to FF/2 (-> 0x15).
// Call, patch to FF/2 (-> 0x15).
if (byte1 == 0xE8)
byte1 = x86EncodeMod(0, 2, 5);
}
else if (byte1 == 0xE9) {
// Jmp, path to FF/4 (-> 0x25).
// Jmp, patch to FF/4 (-> 0x25).
else if (byte1 == 0xE9)
byte1 = x86EncodeMod(0, 4, 5);
}
// Patch `jmp/call` instruction.
ASMJIT_ASSERT(offset >= 2);
@@ -635,7 +581,7 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr base) const {
if (arch == kArchX64)
return (size_t)(tramp - dst);
else
return (size_t)(codeOffset);
return (size_t)(minCodeSize);
}
// ============================================================================
@@ -993,7 +939,7 @@ static Error ASMJIT_CDECL X86Assembler_emit(Assembler* self_, uint32_t code, con
uint8_t* cursor = self->getCursor();
uint32_t encoded = o0->getOp() + (o1->getOp() << 3) + (o2->getOp() << 6);
uint32_t options = self->getOptionsAndReset();
uint32_t options = self->getInstOptionsAndReset();
// Invalid instruction.
if (code >= _kX86InstIdCount) {
@@ -1355,11 +1301,11 @@ _Prepare:
if (encoded == ENC_OPS(Imm, None, None)) {
imVal = static_cast<const Imm*>(o0)->getInt64();
goto _EmitJmpOrCallImm;
goto _EmitJmpOrCallAbs;
}
if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId());
label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (label->offset != -1) {
// Bound label.
static const intptr_t kRel32Size = 5;
@@ -1529,9 +1475,9 @@ _Prepare:
case kX86InstGroupX86Jcc:
if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId());
label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (IntUtil::hasBit(self->_features, kCodeGenPredictedJumps)) {
if (self->hasFeature(kCodeGenPredictedJumps)) {
if (options & kInstOptionTaken)
EMIT_BYTE(0x3E);
if (options & kInstOptionNotTaken)
@@ -1593,7 +1539,7 @@ _Prepare:
}
EMIT_BYTE(0xE3);
label = self->getLabelDataById(static_cast<const Label*>(o1)->getId());
label = self->getLabelData(static_cast<const Label*>(o1)->getId());
if (label->offset != -1) {
// Bound label.
@@ -1630,11 +1576,11 @@ _Prepare:
if (encoded == ENC_OPS(Imm, None, None)) {
imVal = static_cast<const Imm*>(o0)->getInt64();
goto _EmitJmpOrCallImm;
goto _EmitJmpOrCallAbs;
}
if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelDataById(static_cast<const Label*>(o0)->getId());
label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (label->offset != -1) {
// Bound label.
const intptr_t kRel8Size = 2;
@@ -3706,8 +3652,8 @@ _EmitSib:
if (rmMem->getMemType() == kMemTypeLabel) {
// Relative->Absolute [x86 mode].
label = self->getLabelDataById(rmMem->_vmem.base);
relocId = self->_relocData.getLength();
label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocList.getLength();
RelocData reloc;
reloc.type = kRelocRelToAbs;
@@ -3715,7 +3661,7 @@ _EmitSib:
reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
reloc.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocData.append(reloc) != kErrorOk)
if (self->_relocList.append(reloc) != kErrorOk)
return self->setError(kErrorNoHeapMemory);
if (label->offset != -1) {
@@ -3738,7 +3684,7 @@ _EmitSib:
else /* if (Arch === kArchX64) */ {
if (rmMem->getMemType() == kMemTypeLabel) {
// [RIP + Disp32].
label = self->getLabelDataById(rmMem->_vmem.base);
label = self->getLabelData(rmMem->_vmem.base);
// Indexing is invalid.
if (mIndex < kInvalidReg)
@@ -3976,8 +3922,8 @@ _EmitAvxV:
goto _IllegalAddr;
// Relative->Absolute [x86 mode].
label = self->getLabelDataById(rmMem->_vmem.base);
relocId = self->_relocData.getLength();
label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocList.getLength();
RelocData reloc;
reloc.type = kRelocRelToAbs;
@@ -3985,7 +3931,7 @@ _EmitAvxV:
reloc.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
reloc.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocData.append(reloc) != kErrorOk)
if (self->_relocList.append(reloc) != kErrorOk)
return self->setError(kErrorNoHeapMemory);
if (label->offset != -1) {
@@ -4090,43 +4036,55 @@ _EmitXopM:
// --------------------------------------------------------------------------
// 64-bit mode requires a trampoline if a relative displacement doesn't fit
// into 32-bit integer. Old version of AsmJit used to emit jump to a section
// into a 32-bit address. Old version of AsmJit used to emit jump to a section
// which contained another jump followed by an address (it worked well for
// both `jmp` and `call`), but it required to reserve 14-bytes for a possible
// trampoline.
//
// Instead of using 5-byte `jmp/call` and reserving 14 bytes required by the
// trampoline, it's better to use 6-byte `jmp/call` (prefixing it with REX
// prefix) and to patch the `jmp/call` instruction itself.
_EmitJmpOrCallImm:
// prefix) and to patch the `jmp/call` instruction to read the address from
// a memory in case the trampoline is needed.
//
_EmitJmpOrCallAbs:
{
// Emit REX prefix (64-bit).
//
// Does nothing, but allows to path the instruction in case a trampoline is
// needed.
if (Arch == kArchX64) {
EMIT_OP(0x40);
}
// Both `jmp` and `call` instructions have a single-byte opcode.
EMIT_OP(opCode);
RelocData rd;
rd.type = kRelocTrampoline;
rd.type = kRelocAbsToRel;
rd.size = 4;
rd.from = (intptr_t)(cursor - self->_buffer);
rd.from = (intptr_t)(cursor - self->_buffer) + 1;
rd.data = static_cast<SignedPtr>(imVal);
if (self->_relocData.append(rd) != kErrorOk)
return self->setError(kErrorNoHeapMemory);
uint32_t trampolineSize = 0;
// Emit dummy 32-bit integer; will be overwritten by `relocCode()`.
if (Arch == kArchX64) {
Ptr baseAddress = self->getBaseAddress();
Ptr diff = rd.data - (baseAddress + rd.from + 4);
// If the base address of the output is known, it's possible to determine
// the need for a trampoline here. This saves possible REX prefix in
// 64-bit mode and prevents reserving space needed for an absolute address.
if (baseAddress == kNoBaseAddress || !x64IsRelative(rd.data, baseAddress + rd.from + 4)) {
// Emit REX prefix so the instruction can be patched later on. The REX
// prefix does nothing if not patched after, but allows to patch the
// instruction in case where the trampoline is needed.
rd.type = kRelocTrampoline;
rd.from++;
EMIT_OP(0x40);
trampolineSize = 8;
}
}
// Both `jmp` and `call` instructions have a single-byte opcode and are
// followed by a 32-bit displacement.
EMIT_OP(opCode);
EMIT_DWORD(0);
// Trampoline has to be reserved, even if it's not used.
if (Arch == kArchX64) {
self->_trampolineSize += 8;
}
if (self->_relocList.append(rd) != kErrorOk)
return self->setError(kErrorNoHeapMemory);
// Reserve space for a possible trampoline.
self->_trampolineSize += trampolineSize;
}
goto _EmitDone;

View File

@@ -29,43 +29,43 @@ namespace asmjit {
#define ASMJIT_X86_EMIT_OPTIONS(_Class_) \
/*! Force short form of jmp/jcc instruction. */ \
ASMJIT_INLINE _Class_& short_() { \
_options |= kInstOptionShortForm; \
_instOptions |= kInstOptionShortForm; \
return *this; \
} \
\
/*! Force long form of jmp/jcc instruction. */ \
ASMJIT_INLINE _Class_& long_() { \
_options |= kInstOptionLongForm; \
_instOptions |= kInstOptionLongForm; \
return *this; \
} \
\
/*! Condition is likely to be taken (has only benefit on P4). */ \
ASMJIT_INLINE _Class_& taken() { \
_options |= kInstOptionTaken; \
_instOptions |= kInstOptionTaken; \
return *this; \
} \
\
/*! Condition is unlikely to be taken (has only benefit on P4). */ \
ASMJIT_INLINE _Class_& notTaken() { \
_options |= kInstOptionNotTaken; \
_instOptions |= kInstOptionNotTaken; \
return *this; \
} \
\
/*! Use LOCK prefix. */ \
ASMJIT_INLINE _Class_& lock() { \
_options |= kX86InstOptionLock; \
_instOptions |= kX86InstOptionLock; \
return *this; \
} \
\
/*! Force REX prefix. */ \
ASMJIT_INLINE _Class_& rex() { \
_options |= kX86InstOptionRex; \
_instOptions |= kX86InstOptionRex; \
return *this; \
} \
\
/*! Force 3-byte VEX prefix. */ \
ASMJIT_INLINE _Class_& vex3() { \
_options |= kX86InstOptionVex3; \
_instOptions |= kX86InstOptionVex3; \
return *this; \
}
@@ -377,12 +377,6 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler {
ASMJIT_API Error setArch(uint32_t arch);
// --------------------------------------------------------------------------
// [Label]
// --------------------------------------------------------------------------
ASMJIT_API virtual void _bind(const Label& label);
// --------------------------------------------------------------------------
// [Embed]
// --------------------------------------------------------------------------
@@ -439,13 +433,13 @@ struct ASMJIT_VCLASS X86Assembler : public Assembler {
// [Align]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error _align(uint32_t mode, uint32_t offset);
ASMJIT_API virtual Error align(uint32_t mode, uint32_t offset);
// --------------------------------------------------------------------------
// [Reloc]
// --------------------------------------------------------------------------
ASMJIT_API virtual size_t _relocCode(void* dst, Ptr base) const;
ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const;
// --------------------------------------------------------------------------
// [Emit]

View File

@@ -814,7 +814,7 @@ InstNode* X86Compiler::newInst(uint32_t code) {
if (inst == NULL)
goto _NoMemory;
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), NULL, 0);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), NULL, 0);
_NoMemory:
setError(kErrorNoHeapMemory);
@@ -832,7 +832,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0) {
Operand* opList = reinterpret_cast<Operand*>(reinterpret_cast<uint8_t*>(inst) + size);
opList[0] = o0;
ASMJIT_ASSERT_UNINITIALIZED(o0);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 1);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 1);
}
_NoMemory:
@@ -853,7 +853,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
opList[1] = o1;
ASMJIT_ASSERT_UNINITIALIZED(o0);
ASMJIT_ASSERT_UNINITIALIZED(o1);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 2);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 2);
}
_NoMemory:
@@ -876,7 +876,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o0);
ASMJIT_ASSERT_UNINITIALIZED(o1);
ASMJIT_ASSERT_UNINITIALIZED(o2);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 3);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 3);
}
_NoMemory:
@@ -901,7 +901,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o1);
ASMJIT_ASSERT_UNINITIALIZED(o2);
ASMJIT_ASSERT_UNINITIALIZED(o3);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 4);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 4);
}
_NoMemory:
@@ -928,7 +928,7 @@ InstNode* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand&
ASMJIT_ASSERT_UNINITIALIZED(o2);
ASMJIT_ASSERT_UNINITIALIZED(o3);
ASMJIT_ASSERT_UNINITIALIZED(o4);
return X86Compiler_newInst(this, inst, code, getOptionsAndReset(), opList, 5);
return X86Compiler_newInst(this, inst, code, getInstOptionsAndReset(), opList, 5);
}
_NoMemory:
@@ -1189,7 +1189,7 @@ Error X86Compiler::setArg(uint32_t argIndex, Var& var) {
if (func == NULL)
return kErrorInvalidArgument;
if (!isVarCreated(var))
if (!isVarValid(var))
return kErrorInvalidState;
VarData* vd = getVd(var);
@@ -1293,45 +1293,38 @@ _OnError:
// ============================================================================
void* X86Compiler::make() {
// Flush global constant pool
X86Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool);
X86Assembler assembler(_runtime, _arch);
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger)
assembler.setLogger(logger);
#endif // !ASMJIT_DISABLE_LOGGER
assembler._features = _features;
if (serialize(assembler) != kErrorOk)
return NULL;
if (assembler.getError() != kErrorOk) {
setError(assembler.getError());
Assembler* assembler = getAssembler();
if (assembler == NULL) {
setError(kErrorNoHeapMemory);
return NULL;
}
void* result = assembler.make();
#if !defined(ASMJIT_DISABLE_LOGGER)
if (logger)
logger->logFormat(kLoggerStyleComment,
"*** COMPILER SUCCESS - Wrote %u bytes, code: %u, trampolines: %u.\n\n",
static_cast<unsigned int>(assembler.getCodeSize()),
static_cast<unsigned int>(assembler.getOffset()),
static_cast<unsigned int>(assembler.getTrampolineSize()));
#endif // !ASMJIT_DISABLE_LOGGER
Error error = serialize(assembler);
if (error != kErrorOk) {
setError(error);
return NULL;
}
void* result = assembler->make();
return result;
}
// ============================================================================
// [asmjit::X86Compiler - Assemble]
// [asmjit::X86Compiler - Assembler]
// ============================================================================
Error X86Compiler::serialize(Assembler& assembler) {
Assembler* X86Compiler::_newAssembler() {
return new(std::nothrow) X86Assembler(_runtime, _arch);
}
// ============================================================================
// [asmjit::X86Compiler - Serialize]
// ============================================================================
Error X86Compiler::serialize(Assembler* assembler) {
// Flush the global constant pool.
X86Compiler_emitConstPool(this, _globalConstPoolLabel, _globalConstPool);
if (_firstNode == NULL)
return kErrorOk;
@@ -1357,7 +1350,7 @@ Error X86Compiler::serialize(Assembler& assembler) {
node = node->getNext();
} while (node != NULL && node->getType() != kNodeTypeFunc);
error = context.serialize(&assembler, start, node);
error = context.serialize(assembler, start, node);
if (error != kErrorOk)
goto _Error;
context.cleanup();

View File

@@ -37,7 +37,7 @@ struct X86VarState;
// ============================================================================
//! X86/X64 variable type.
ASMJIT_ENUM(k86VarType) {
ASMJIT_ENUM(kX86VarType) {
//! Variable is SP-FP (x87).
kX86VarTypeFp32 = kVarTypeFp32,
//! Variable is DP-FP (x87).
@@ -1547,7 +1547,7 @@ ASMJIT_TYPE_ID(X86YmmVar, kX86VarTypeYmm);
//!
//! // Final step - generate code. asmjit::Compiler::serialize() will send all
//! // instructions into Assembler and this ensures generating real machine code.
//! c.serialize(a);
//! c.serialize(&a);
//!
//! // Your function
//! void* fn = a.make();
@@ -2401,11 +2401,17 @@ struct ASMJIT_VCLASS X86Compiler : public Compiler {
ASMJIT_API virtual void* make();
// -------------------------------------------------------------------------
// [Assembler]
// -------------------------------------------------------------------------
ASMJIT_API virtual Assembler* _newAssembler();
// -------------------------------------------------------------------------
// [Serialize]
// -------------------------------------------------------------------------
ASMJIT_API virtual Error serialize(Assembler& assembler);
ASMJIT_API virtual Error serialize(Assembler* assembler);
// -------------------------------------------------------------------------
// [Options]

View File

@@ -891,14 +891,14 @@ void X86Context::emitPopSequence(uint32_t regs) {
if (regs == 0)
return;
int32_t i = static_cast<int32_t>(_regCount.getGp()) - 1;
uint32_t mask = 0x1 << static_cast<uint32_t>(i);
uint32_t i = static_cast<int32_t>(_regCount.getGp());
uint32_t mask = 0x1 << static_cast<uint32_t>(i - 1);
X86GpReg gpReg(_zsp);
while (i >= 0) {
while (i) {
i--;
if ((regs & mask) != 0)
compiler->emit(kX86InstIdPop, gpReg.setIndex(i));
i--;
mask >>= 1;
}
}
@@ -931,7 +931,7 @@ void X86Context::emitConvertVarToVar(uint32_t dstType, uint32_t dstIndex, uint32
case kX86VarTypeXmmSs:
if (srcType == kX86VarTypeXmmSd || srcType == kX86VarTypeXmmPd || srcType == kX86VarTypeYmmPd) {
compiler->emit(kX86InstIdCvtsd2ss, x86::xmm(dstIndex), x86::xmm(srcIndex));
break;
return;
}
if (IntUtil::inInterval<uint32_t>(srcType, _kVarTypeIntStart, _kVarTypeIntEnd)) {
@@ -973,8 +973,7 @@ void X86Context::emitMoveVarOnStack(
X86Compiler* compiler = getCompiler();
X86Mem m0(*dst);
X86Reg r0;
X86Reg r1;
X86Reg r0, r1;
uint32_t regSize = compiler->getRegSize();
uint32_t instCode;
@@ -1271,14 +1270,12 @@ void X86Context::emitMoveImmOnStack(uint32_t dstType, const X86Mem* dst, const I
case kVarTypeInt8:
case kVarTypeUInt8:
imm.truncateTo8Bits();
compiler->emit(kX86InstIdMov, mem, imm);
break;
goto _Move32;
case kVarTypeInt16:
case kVarTypeUInt16:
imm.truncateTo16Bits();
compiler->emit(kX86InstIdMov, mem, imm);
break;
goto _Move32;
case kVarTypeInt32:
case kVarTypeUInt32:
@@ -1294,13 +1291,11 @@ _Move64:
uint32_t hi = imm.getUInt32Hi();
// Lo-Part.
imm.truncateTo32Bits();
compiler->emit(kX86InstIdMov, mem, imm);
compiler->emit(kX86InstIdMov, mem, imm.truncateTo32Bits());
mem.adjust(regSize);
// Hi-Part.
mem.adjust(regSize);
imm.setUInt32(hi);
compiler->emit(kX86InstIdMov, mem, imm);
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(hi));
}
else {
compiler->emit(kX86InstIdMov, mem, imm);
@@ -1324,33 +1319,27 @@ _Move64:
if (regSize == 4) {
uint32_t hi = imm.getUInt32Hi();
// Lo-Part.
imm.truncateTo32Bits();
compiler->emit(kX86InstIdMov, mem, imm);
// Hi-Part.
// Lo part.
compiler->emit(kX86InstIdMov, mem, imm.truncateTo32Bits());
mem.adjust(regSize);
imm.setUInt32(hi);
compiler->emit(kX86InstIdMov, mem, imm);
// Zero part - performing AND should generate shorter code, because
// 8-bit immediate can be used instead of 32-bit immediate required
// by MOV instruction.
// Hi part.
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(hi));
mem.adjust(regSize);
imm.setUInt32(0);
compiler->emit(kX86InstIdAnd, mem, imm);
mem.adjust(regSize);
compiler->emit(kX86InstIdAnd, mem, imm);
}
else {
// Lo-Hi parts.
compiler->emit(kX86InstIdMov, mem, imm);
// Zero part.
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(0));
mem.adjust(regSize);
imm.setUInt32(0);
compiler->emit(kX86InstIdAnd, mem, imm);
compiler->emit(kX86InstIdMov, mem, imm);
}
else {
// Lo/Hi parts.
compiler->emit(kX86InstIdMov, mem, imm);
mem.adjust(regSize);
// Zero part.
compiler->emit(kX86InstIdMov, mem, imm.setUInt32(0));
}
break;
@@ -5345,8 +5334,7 @@ _NextGroup:
goto _Advance;
}
// Remove informative nodes if we are in a middle of instruction
// stream.
// Remove informative nodes if we are in a middle of instruction stream.
//
// TODO: Shouldn't be there an option for this? Maybe it can be useful
// to stop if there is a comment or something. I'm not sure if it's
@@ -5422,7 +5410,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
// Create labels on Assembler side.
ASMJIT_PROPAGATE_ERROR(
assembler->_registerIndexedLabels(self->getCompiler()->_targets.getLength()));
assembler->_registerIndexedLabels(self->getCompiler()->_targetList.getLength()));
do {
#if !defined(ASMJIT_DISABLE_LOGGER)
@@ -5506,6 +5494,8 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
case kNodeTypeTarget: {
TargetNode* node = static_cast<TargetNode*>(node_);
node->setOffset(assembler->getOffset());
assembler->bind(node->getLabel());
break;
}
@@ -5517,7 +5507,7 @@ static ASMJIT_INLINE Error X86Context_serialize(X86Context* self, X86Assembler*
uint32_t opCount = node->getOpCount();
const Operand* opList = node->getOpList();
assembler->_options = node->getOptions();
assembler->_instOptions = node->getOptions();
const Operand* o0 = &noOperand;
const Operand* o1 = &noOperand;