Reworked CpuInfo (removed X86CpuInfo, added ARM CpuInfo support).

Renamed CodeGen to ExternalTool.
Moved logger constants from asmjit namespace to asmjit::Logger.
Moved AssemblerFeature constants from asmjit namespace to asmjit::Assembler.
Added noexcept to most APIs that are not intended to throw.
Added memory utilities that can read/write to unaligned memory location (ongoing ARM support).
Removed unimplemented instruction scheduler, will be added back when it's working.
This commit is contained in:
kobalicek
2016-03-21 20:04:13 +01:00
parent a5cdf0b44b
commit 185a96a46a
61 changed files with 6281 additions and 5730 deletions

View File

@@ -1,3 +1,23 @@
2016-03-21
CpuInfo has been completely redesigned. It now supports multiple CPUs without having to inherit it to support a specific architecture. Also all CpuInfo-related constants have been moved to CpuInfo.
Change:
```
const X86CpuInfo* cpu = X86CpuInfo::getHost();
cpu->hasFeature(kX86CpuFeatureSSE4_1);
```
to
```
const CpuInfo& cpu = CpuInfo::getHost();
cpu.hasFeature(CpuInfo::kX86FeatureSSE4_1);
```
The whole code-base now uses `noexcept` keyword to inform API users that these functions won't throw an exception. Moreover, the possibility to throw exception through `ErrorHandler` has been removed as it seems that nobody has ever used it. `Assembler::emit()` and friends are still not marked as `noexcept` in case this decision is taken back. If there is no complaint even `emit()` functions will be marked `noexcept` in the near future.
2015-12-07
----------

View File

@@ -10,6 +10,18 @@ cmake_minimum_required(VERSION 3.1)
# Whether to build a static library (default FALSE).
# set(ASMJIT_STATIC FALSE)
# Whether to build ARM32 backend (TRUE if building for ARM32).
# set(ASMJIT_BUILD_ARM32 FALSE)
# Whether to build ARM64 backend (TRUE if building for ARM64).
# set(ASMJIT_BUILD_ARM64 FALSE)
# Whether to build X86 backend (TRUE if building for X86).
# set(ASMJIT_BUILD_X86 FALSE)
# Whether to build X64 backend (TRUE if building for X64).
# set(ASMJIT_BUILD_X64 FALSE)
# Whether to build tests and samples (default FALSE).
# set(ASMJIT_BUILD_TEST FALSE)
@@ -55,6 +67,7 @@ message("-- [asmjit] ASMJIT_DIR=${ASMJIT_DIR}")
set(ASMJIT_SOURCE_DIR "${ASMJIT_DIR}/src") # Asmjit source directory.
set(ASMJIT_INCLUDE_DIR "${ASMJIT_SOURCE_DIR}") # Asmjit include directory.
set(ASMJIT_CFLAGS) # Asmjit CFLAGS / CXXFLAGS.
set(ASMJIT_DEPS) # Asmjit dependencies (list of libraries) for the linker.
set(ASMJIT_LIBS) # Asmjit dependencies with asmjit included, for consumers.
@@ -104,11 +117,28 @@ if(NOT ASMJIT_EMBED)
list(INSERT ASMJIT_LIBS 0 asmjit)
endif()
set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG})
set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL})
if(ASMJIT_BUILD_ARM32)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM32")
endif()
if(ASMJIT_BUILD_ARM64)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM64")
endif()
if(ASMJIT_BUILD_X86)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X86")
endif()
if(ASMJIT_BUILD_X64)
List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X64")
endif()
set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG})
set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL})
message("-- [asmjit] ASMJIT_DEPS=${ASMJIT_DEPS}")
message("-- [asmjit] ASMJIT_LIBS=${ASMJIT_LIBS}")
message("-- [asmjit] ASMJIT_CFLAGS=${ASMJIT_CFLAGS}")
# =============================================================================
# [AsmJit - Macros]
@@ -168,6 +198,8 @@ asmjit_add_source(ASMJIT_SRC asmjit
base.h
build.h
host.h
arm.h
x86.h
)
@@ -193,6 +225,8 @@ asmjit_add_source(ASMJIT_SRC asmjit/base
logger.h
operand.cpp
operand.h
podvector.cpp
podvector.h
runtime.cpp
runtime.h
utils.cpp
@@ -204,6 +238,18 @@ asmjit_add_source(ASMJIT_SRC asmjit/base
zone.h
)
if(0)
asmjit_add_source(ASMJIT_SRC asmjit/arm
armassembler.cpp
armassembler.h
arminst.cpp
arminst.h
armoperand.cpp
armoperand_regs.cpp
armoperand.h
)
endif()
asmjit_add_source(ASMJIT_SRC asmjit/x86
x86assembler.cpp
x86assembler.h
@@ -213,15 +259,11 @@ asmjit_add_source(ASMJIT_SRC asmjit/x86
x86compilercontext_p.h
x86compilerfunc.cpp
x86compilerfunc.h
x86cpuinfo.cpp
x86cpuinfo.h
x86inst.cpp
x86inst.h
x86operand.cpp
x86operand_regs.cpp
x86operand.h
x86scheduler.cpp
x86scheduler_p.h
)
# =============================================================================
@@ -252,7 +294,7 @@ if(NOT ASMJIT_EMBED)
# Add `asmjit` tests and samples.
if(ASMJIT_BUILD_TEST)
set(ASMJIT_TEST_SRC "")
set(ASMJIT_TEST_CFLAGS ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED)
set(ASMJIT_TEST_CFLAGS ${ASMJIT_CFLAGS} ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED)
asmjit_add_source(ASMJIT_TEST_SRC test asmjit_test_unit.cpp broken.cpp broken.h)
add_executable(asmjit_test_unit ${ASMJIT_SRC} ${ASMJIT_TEST_SRC})
@@ -273,6 +315,7 @@ if(NOT ASMJIT_EMBED)
foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86)
add_executable(${_target} "src/test/${_target}.cpp")
target_compile_options(${_target} PRIVATE ${ASMJIT_CFLAGS})
target_link_libraries(${_target} ${ASMJIT_LIBS})
endforeach()
endif()

View File

@@ -37,15 +37,16 @@ Supported Environments
### C++ Compilers
* BorlandC++ (not tested regularly)
* CLang (tested by Travis-CI)
* Clang (tested by Travis-CI)
* CodeGear (including BorlandC++, not tested regularly)
* GCC (tested by Travis-CI)
* MinGW (tested manually)
* MSVC (tested manually, at least Visual Studio 2003 required)
* MSVC (tested manually, at least VS2003 is required)
* Other compilers require some testing and support in `asmjit/build.h`
### Backends
* ARM (work-in-progress)
* X86 (tested by Travis-CI)
* X64 (tested by Travis-CI)
@@ -54,9 +55,10 @@ Project Organization
* `/` - Project root
* `src` - Source code
* `asmjit` - Public header files (always include from here)
* `base` - Base files, used by AsmJit and all backends
* `x86` - X86/X64 specific files, used only by X86/X64 backend
* `asmjit` - Source code and headers (always point include path in here)
* `base` - Generic API and interfaces, used by all backends
* `arm` - ARM/ARM64 specific API, used only by ARM and ARM64 backends
* `x86` - X86/X64 specific API, used only by X86 and X64 backends
* `test` - Unit and integration tests (don't embed in your project)
* `tools` - Tools used for configuring, documenting and generating files
@@ -93,9 +95,11 @@ AsmJit is designed to be easy embeddable in any kind project. However, it has so
### Architectures
* `ASMJIT_BUILD_X86` - Always build x86 backend regardless of host architecture.
* `ASMJIT_BUILD_X64` - Always build x64 backend regardless of host architecture.
* `ASMJIT_BUILD_HOST` - Always build host backend, if only `ASMJIT_BUILD_HOST` is used only the host architecture detected at compile-time will be included.
* `ASMJIT_BUILD_ARM` - Build ARM backend.
* `ASMJIT_BUILD_ARM64` - Build ARM64 backend.
* `ASMJIT_BUILD_X86` - Build x86 backend.
* `ASMJIT_BUILD_X64` - Build x64 backend.
* `ASMJIT_BUILD_HOST` - Build host backend, if only `ASMJIT_BUILD_HOST` is used only the host architecture detected at compile-time will be included.
* By default only `ASMJIT_BUILD_HOST` is defined.

20
src/asmjit/arm.h Normal file
View File

@@ -0,0 +1,20 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_ARM_H
#define _ASMJIT_ARM_H
// [Dependencies - AsmJit]
#include "./base.h"
#include "./arm/armassembler.h"
#include "./arm/armcompiler.h"
#include "./arm/arminst.h"
#include "./arm/armoperand.h"
// [Guard]
#endif // _ASMJIT_ARM_H

View File

@@ -81,7 +81,7 @@
//!
//! List of the most useful code-generation and operand classes:
//! - \ref asmjit::Assembler - Low-level code-generation.
//! - \ref asmjit::CodeGen - Astract code-generation that serializes to `Assembler`:
//! - \ref asmjit::ExternalTool - An external tool that can serialize to `Assembler`:
//! - \ref asmjit::Compiler - High-level code-generation.
//! - \ref asmjit::Runtime - Describes where the code is stored and how it's executed:
//! - \ref asmjit::HostRuntime - Runtime that runs on the host machine:
@@ -307,69 +307,47 @@
//! the host X86/X64 processor. AsmJit contains utilities that can get the most
//! important information related to the features supported by the CPU and the
//! host operating system, in addition to host processor name and number of
//! cores. Class `X86CpuInfo` extends `CpuInfo` and provides functionality
//! specific to X86 and X64.
//! cores. Class `CpuInfo` provides generic information about a host or target
//! processor and contains also a specific X86/X64 information.
//!
//! By default AsmJit queries the CPU information after the library is loaded
//! and the queried information is reused by all instances of `JitRuntime`.
//! The global instance of `X86CpuInfo` can't be changed, because it will affect
//! The global instance of `CpuInfo` can't be changed, because it will affect
//! the code generation of all `Runtime`s. If there is a need to have a
//! specific CPU information which contains modified features or processor
//! vendor it's possible by creating a new instance of `X86CpuInfo` and setting
//! up its members. `X86CpuUtil::detect` can be used to detect CPU features into
//! an existing `X86CpuInfo` instance - it may become handly if only one property
//! has to be turned on/off.
//!
//! If the high-level interface `X86CpuInfo` offers is not enough there is also
//! `X86CpuUtil::callCpuId` helper that can be used to call CPUID instruction
//! with a given parameters and to consume the output.
//! vendor it's possible by creating a new instance of the `CpuInfo` and setting
//! up its members.
//!
//! Cpu detection is important when generating a JIT code that may or may not
//! use certain CPU features. For example there used to be a SSE/SSE2 detection
//! in the past and today there is often AVX/AVX2 detection.
//!
//! The example below shows how to detect SSE4.1:
//! The example below shows how to detect a SSE4.1 instruction set:
//!
//! ~~~
//! using namespace asmjit;
//!
//! // Get `X86CpuInfo` global instance.
//! const X86CpuInfo* cpuInfo = X86CpuInfo::getHost();
//! const CpuInfo& cpuInfo = CpuInfo::getHost();
//!
//! if (cpuInfo->hasFeature(kX86CpuFeatureSSE4_1)) {
//! if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE4_1)) {
//! // Processor has SSE4.1.
//! }
//! else if (cpuInfo->hasFeature(kX86CpuFeatureSSE2)) {
//! else if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE2)) {
//! // Processor doesn't have SSE4.1, but has SSE2.
//! }
//! else {
//! // Processor is archaic; it's a wonder AsmJit works here!
//! }
//! ~~~
//!
//! The next example shows how to call `CPUID` directly:
//!
//! ~~~
//! using namespace asmjit;
//!
//! // Call CPUID, first two arguments are passed in EAX/ECX.
//! X86CpuId out;
//! X86CpuUtil::callCpuId(0, 0, &out);
//!
//! // If EAX argument is 0, EBX, ECX and EDX registers are filled with a CPU vendor.
//! char cpuVendor[13];
//! ::memcpy(cpuVendor, &out.ebx, 4);
//! ::memcpy(cpuVendor + 4, &out.edx, 4);
//! ::memcpy(cpuVendor + 8, &out.ecx, 4);
//! vendor[12] = '\0';
//!
//! // Print the CPU vendor retrieved from CPUID.
//! ::printf("CPU Vendor: %s\n", cpuVendor);
//! ~~~
// [Dependencies - Base]
#include "./base.h"
// [Dependencies - ARM/ARM64]
#if defined(ASMJIT_BUILD_ARM32) || defined(ASMJIT_BUILD_ARM64)
#include "./arm.h"
#endif // ASMJIT_BUILD_ARM32 || ASMJIT_BUILD_ARM64
// [Dependencies - X86/X64]
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
#include "./x86.h"

View File

@@ -18,6 +18,7 @@
#include "./base/globals.h"
#include "./base/logger.h"
#include "./base/operand.h"
#include "./base/podvector.h"
#include "./base/runtime.h"
#include "./base/utils.h"
#include "./base/vectypes.h"

View File

@@ -24,29 +24,29 @@ namespace asmjit {
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() {}
ErrorHandler::~ErrorHandler() {}
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
ErrorHandler* ErrorHandler::addRef() const {
ErrorHandler* ErrorHandler::addRef() const noexcept {
return const_cast<ErrorHandler*>(this);
}
void ErrorHandler::release() {}
void ErrorHandler::release() noexcept {}
// ============================================================================
// [asmjit::CodeGen]
// [asmjit::ExternalTool]
// ============================================================================
CodeGen::CodeGen()
ExternalTool::ExternalTool() noexcept
: _assembler(nullptr),
_hlId(0),
_exId(0),
_arch(kArchNone),
_regSize(0),
_finalized(false),
_reserved(0),
_lastError(kErrorNotInitialized) {}
CodeGen::~CodeGen() {}
ExternalTool::~ExternalTool() noexcept {}
Error CodeGen::setLastError(Error error, const char* message) {
Error ExternalTool::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
@@ -63,7 +63,7 @@ Error CodeGen::setLastError(Error error, const char* message) {
// Logging is skipped if the error is handled by `ErrorHandler.
ErrorHandler* eh = assembler->getErrorHandler();
ASMJIT_TLOG("[ERROR (CodeGen)] %s (0x%0.8u) %s\n", message,
ASMJIT_TLOG("[ERROR (ExternalTool)] %s (0x%0.8u) %s\n", message,
static_cast<unsigned int>(error),
!eh ? "(Possibly unhandled?)" : "");
@@ -73,8 +73,8 @@ Error CodeGen::setLastError(Error error, const char* message) {
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = assembler->getLogger();
if (logger != nullptr)
logger->logFormat(kLoggerStyleComment,
"*** ERROR (CodeGen): %s (0x%0.8u).\n", message,
logger->logFormat(Logger::kStyleComment,
"*** ERROR (ExternalTool): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
@@ -89,18 +89,18 @@ Error CodeGen::setLastError(Error error, const char* message) {
// [asmjit::Assembler - Construction / Destruction]
// ============================================================================
Assembler::Assembler(Runtime* runtime)
Assembler::Assembler(Runtime* runtime) noexcept
: _runtime(runtime),
_logger(nullptr),
_errorHandler(nullptr),
_arch(kArchNone),
_regSize(0),
_reserved(0),
_features(Utils::mask(kAssemblerFeatureOptimizedAlign)),
_asmOptions(0),
_instOptions(0),
_lastError(runtime ? kErrorOk : kErrorNotInitialized),
_hlIdGenerator(0),
_hlAttachedCount(0),
_exIdGenerator(0),
_exCountAttached(0),
_zoneAllocator(8192 - Zone::kZoneOverhead),
_buffer(nullptr),
_end(nullptr),
@@ -108,10 +108,10 @@ Assembler::Assembler(Runtime* runtime)
_trampolinesSize(0),
_comment(nullptr),
_unusedLinks(nullptr),
_labelList(),
_relocList() {}
_labels(),
_relocations() {}
Assembler::~Assembler() {
Assembler::~Assembler() noexcept {
reset(true);
if (_errorHandler != nullptr)
@@ -122,12 +122,12 @@ Assembler::~Assembler() {
// [asmjit::Assembler - Reset]
// ============================================================================
void Assembler::reset(bool releaseMemory) {
_features = Utils::mask(kAssemblerFeatureOptimizedAlign);
void Assembler::reset(bool releaseMemory) noexcept {
_asmOptions = 0;
_instOptions = 0;
_lastError = kErrorOk;
_hlIdGenerator = 0;
_hlAttachedCount = 0;
_exIdGenerator = 0;
_exCountAttached = 0;
_zoneAllocator.reset(releaseMemory);
@@ -143,15 +143,16 @@ void Assembler::reset(bool releaseMemory) {
_comment = nullptr;
_unusedLinks = nullptr;
_labelList.reset(releaseMemory);
_relocList.reset(releaseMemory);
_sections.reset(releaseMemory);
_labels.reset(releaseMemory);
_relocations.reset(releaseMemory);
}
// ============================================================================
// [asmjit::Assembler - Logging & Error Handling]
// ============================================================================
Error Assembler::setLastError(Error error, const char* message) {
Error Assembler::setLastError(Error error, const char* message) noexcept {
// Special case, reset the last error the error is `kErrorOk`.
if (error == kErrorOk) {
_lastError = kErrorOk;
@@ -173,7 +174,7 @@ Error Assembler::setLastError(Error error, const char* message) {
#if !defined(ASMJIT_DISABLE_LOGGER)
Logger* logger = _logger;
if (logger != nullptr)
logger->logFormat(kLoggerStyleComment,
logger->logFormat(Logger::kStyleComment,
"*** ERROR (Assembler): %s (0x%0.8u).\n", message,
static_cast<unsigned int>(error));
#endif // !ASMJIT_DISABLE_LOGGER
@@ -185,7 +186,7 @@ Error Assembler::setLastError(Error error, const char* message) {
return error;
}
Error Assembler::setErrorHandler(ErrorHandler* handler) {
Error Assembler::setErrorHandler(ErrorHandler* handler) noexcept {
ErrorHandler* oldHandler = _errorHandler;
if (oldHandler != nullptr)
@@ -202,7 +203,7 @@ Error Assembler::setErrorHandler(ErrorHandler* handler) {
// [asmjit::Assembler - Buffer]
// ============================================================================
Error Assembler::_grow(size_t n) {
Error Assembler::_grow(size_t n) noexcept {
size_t capacity = getCapacity();
size_t after = getOffset() + n;
@@ -237,7 +238,7 @@ Error Assembler::_grow(size_t n) {
return _reserve(capacity);
}
Error Assembler::_reserve(size_t n) {
Error Assembler::_reserve(size_t n) noexcept {
size_t capacity = getCapacity();
if (n <= capacity)
return kErrorOk;
@@ -264,16 +265,16 @@ Error Assembler::_reserve(size_t n) {
// [asmjit::Assembler - Label]
// ============================================================================
Error Assembler::_newLabelId() {
Error Assembler::_newLabelId() noexcept {
LabelData* data = _zoneAllocator.allocT<LabelData>();
data->offset = -1;
data->links = nullptr;
data->hlId = 0;
data->hlData = nullptr;
data->exId = 0;
data->exData = nullptr;
uint32_t id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labelList.getLength()));
Error error = _labelList.append(data);
uint32_t id = OperandUtil::makeLabelId(static_cast<uint32_t>(_labels.getLength()));
Error error = _labels.append(data);
if (error != kErrorOk) {
setLastError(kErrorNoHeapMemory);
@@ -283,7 +284,7 @@ Error Assembler::_newLabelId() {
return id;
}
LabelLink* Assembler::_newLabelLink() {
LabelLink* Assembler::_newLabelLink() noexcept {
LabelLink* link = _unusedLinks;
if (link) {
@@ -303,7 +304,7 @@ LabelLink* Assembler::_newLabelLink() {
return link;
}
Error Assembler::bind(const Label& label) {
Error Assembler::bind(const Label& label) noexcept {
// Get label data based on label id.
uint32_t index = label.getId();
LabelData* data = getLabelData(index);
@@ -318,11 +319,11 @@ Error Assembler::bind(const Label& label) {
sb.setFormat("L%u:", index);
size_t binSize = 0;
if ((_logger->getOptions() & (1 << kLoggerOptionBinaryForm)) == 0)
if (!_logger->hasOption(Logger::kOptionBinaryForm))
binSize = kInvalidIndex;
LogUtil::formatLine(sb, nullptr, binSize, 0, 0, _comment);
_logger->logString(kLoggerStyleLabel, sb.getData(), sb.getLength());
_logger->logString(Logger::kStyleLabel, sb.getData(), sb.getLength());
}
#endif // !ASMJIT_DISABLE_LOGGER
@@ -338,7 +339,7 @@ Error Assembler::bind(const Label& label) {
if (link->relocId != -1) {
// Handle RelocData - We have to update RelocData information instead of
// patching the displacement in LabelData.
_relocList[link->relocId].data += static_cast<Ptr>(pos);
_relocations[link->relocId].data += static_cast<Ptr>(pos);
}
else {
// Not using relocId, this means that we are overwriting a real
@@ -347,16 +348,16 @@ Error Assembler::bind(const Label& label) {
static_cast<intptr_t>(pos) - offset + link->displacement);
// Size of the value we are going to patch. Only BYTE/DWORD is allowed.
uint32_t size = getByteAt(offset);
uint32_t size = readU8At(offset);
ASMJIT_ASSERT(size == 1 || size == 4);
if (size == 4) {
setInt32At(offset, patchedValue);
writeI32At(offset, patchedValue);
}
else {
ASMJIT_ASSERT(size == 1);
if (Utils::isInt8(patchedValue))
setByteAt(offset, static_cast<uint8_t>(patchedValue & 0xFF));
writeU8At(offset, static_cast<uint32_t>(patchedValue) & 0xFF);
else
error = kErrorIllegalDisplacement;
}
@@ -391,7 +392,7 @@ Error Assembler::bind(const Label& label) {
// [asmjit::Assembler - Embed]
// ============================================================================
Error Assembler::embed(const void* data, uint32_t size) {
Error Assembler::embed(const void* data, uint32_t size) noexcept {
if (getRemainingSpace() < size) {
Error error = _grow(size);
if (error != kErrorOk)
@@ -404,7 +405,7 @@ Error Assembler::embed(const void* data, uint32_t size) {
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logBinary(kLoggerStyleData, data, size);
_logger->logBinary(Logger::kStyleData, data, size);
#endif // !ASMJIT_DISABLE_LOGGER
return kErrorOk;
@@ -414,7 +415,7 @@ Error Assembler::embed(const void* data, uint32_t size) {
// [asmjit::Assembler - Reloc]
// ============================================================================
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const {
size_t Assembler::relocCode(void* dst, Ptr baseAddress) const noexcept {
if (baseAddress == kNoBaseAddress)
baseAddress = static_cast<Ptr>((uintptr_t)dst);
return _relocCode(dst, baseAddress);
@@ -424,7 +425,7 @@ size_t Assembler::relocCode(void* dst, Ptr baseAddress) const {
// [asmjit::Assembler - Make]
// ============================================================================
void* Assembler::make() {
void* Assembler::make() noexcept {
// Do nothing on error condition or if no instruction has been emitted.
if (_lastError != kErrorOk || getCodeSize() == 0)
return nullptr;

View File

@@ -12,6 +12,7 @@
#include "../base/containers.h"
#include "../base/logger.h"
#include "../base/operand.h"
#include "../base/podvector.h"
#include "../base/runtime.h"
#include "../base/zone.h"
@@ -23,48 +24,6 @@ namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::AssemblerFeatures]
// ============================================================================
//! Features of \ref Assembler.
ASMJIT_ENUM(AssemblerFeatures) {
//! Emit optimized code-alignment sequences (`Assembler` and `Compiler`).
//!
//! Default `true`.
//!
//! X86/X64
//! -------
//!
//! Default align sequence used by X86/X64 architecture is one-byte 0x90
//! opcode that is mostly shown by disassemblers as nop. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster.
//! If this feature is enabled asmjit will generate specialized sequences
//! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used,
//! it can add REX prefixes into the code to make some instructions greater
//! so no alignment sequence is needed.
kAssemblerFeatureOptimizedAlign = 0,
//! Emit jump-prediction hints (`Assembler` and `Compiler`).
//!
//! Default `false`.
//!
//! X86/X64
//! -------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction that ignores any static hints.
kAssemblerFeaturePredictedJumps = 1
};
// ============================================================================
// [asmjit::InstId]
// ============================================================================
@@ -230,13 +189,12 @@ struct LabelData {
//! Label links chain.
LabelLink* links;
//! An ID of a code-generator that created this label.
uint64_t hlId;
//! Pointer to the data the code-generator associated with the label.
void* hlData;
//! External tool ID, if linked to any.
uint64_t exId;
//! Pointer to a data that `ExternalTool` associated with the label.
void* exData;
};
// ============================================================================
// [asmjit::RelocData]
// ============================================================================
@@ -271,7 +229,7 @@ struct RelocData {
//! Error handler.
//!
//! Error handler can be used to override the default behavior of `CodeGen`
//! Error handler can be used to override the default behavior of `Assembler`
//! error handling and propagation. See `handleError()` on how to override it.
//!
//! Please note that `addRef` and `release` functions are used, but there is
@@ -283,9 +241,9 @@ struct ASMJIT_VIRTAPI ErrorHandler {
// --------------------------------------------------------------------------
//! Create a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler();
ASMJIT_API ErrorHandler() noexcept;
//! Destroy the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler();
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [AddRef / Release]
@@ -298,13 +256,13 @@ struct ASMJIT_VIRTAPI ErrorHandler {
//! multiple `ErrorHandler` instances are used by a different code generators
//! you may provide your own functionality for reference counting. In that
//! case `addRef()` and `release()` functions should be overridden.
ASMJIT_API virtual ErrorHandler* addRef() const;
ASMJIT_API virtual ErrorHandler* addRef() const noexcept;
//! Release this error handler.
//!
//! \note This member function is provided for convenience. See `addRef()`
//! for more detailed information related to reference counting.
ASMJIT_API virtual void release();
ASMJIT_API virtual void release() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
@@ -312,50 +270,40 @@ struct ASMJIT_VIRTAPI ErrorHandler {
//! Error handler (pure).
//!
//! Error handler is called when an error happened. An error can happen in
//! many places, but error handler is mostly used by `Assembler` and
//! `Compiler` classes to report anything that may cause incorrect code
//! generation. There are multiple ways how the error handler can be used
//! and each has it's pros/cons.
//! Error handler is called after an error happened. An error can happen in
//! many places, but error handler is mostly used by `Assembler` to report
//! anything a fatal problem. There are multiple ways how the error handler
//! can be used:
//!
//! AsmJit library doesn't use exceptions and can be compiled with or without
//! exception handling support. Even if the AsmJit library is compiled without
//! exceptions it is exception-safe and handleError() can report an incoming
//! error by throwing an exception of any type. It's guaranteed that the
//! exception won't be catched by AsmJit and will be propagated to the code
//! calling AsmJit `Assembler` or `Compiler` methods. Alternative to
//! throwing an exception is using `setjmp()` and `longjmp()` pair available
//! in the standard C library.
//! 1. Returning `true` or `false` from `handleError()`. If `true` is
//! returned it means that error was reported and AsmJit can continue
//! with code-generation. However, `false` reports to AsmJit that the
//! error cannot be handled, in such case it stores the error in
//! `Assembler` and puts it into an error state. The error is accessible
//! through `Assembler::getLastError(). Returning `false` is default when
//! no error handler is used.
//!
//! If the exception or setjmp() / longjmp() mechanism is used, the state of
//! the `BaseAssember` or `Compiler` is unchanged and if it's possible the
//! execution (instruction serialization) can continue. However if the error
//! happened during any phase that translates or modifies the stored code
//! (for example relocation done by `Assembler` or analysis/translation
//! done by `Compiler`) the execution can't continue and the error will
//! be also stored in `Assembler` or `Compiler`.
//!
//! Finally, if no exceptions nor setjmp() / longjmp() mechanisms were used,
//! you can still implement a compatible handling by returning from your
//! error handler. Returning `true` means that error was reported and AsmJit
//! should continue execution, but `false` sets the error immediately to the
//! `Assembler` or `Compiler` and execution shouldn't continue (this is the
//! default behavior in case no error handler is used).
virtual bool handleError(Error code, const char* message, void* origin) = 0;
//! 2. AsmJit doesn't use exception handling so your error should also not
//! throw an exception, however, it's possible to use plain old C's
//! `setjmp()` and `longjmp()`. Asmjit always puts `Assembler` and
//! `Compiler` to a consistent state before calling the `handleError()`,
//! so you can use `longjmp()` to leave the code-generation if an error
//! happened.
virtual bool handleError(Error code, const char* message, void* origin) noexcept = 0;
};
// ============================================================================
// [asmjit::CodeGen]
// [asmjit::ExternalTool]
// ============================================================================
//! Interface to implement an external code generator (i.e. `Compiler`).
struct ASMJIT_VIRTAPI CodeGen {
//! An external tool (i.e. `Stream` or `Compiler`) that can serialize to `Assembler`
struct ASMJIT_VIRTAPI ExternalTool {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API CodeGen();
ASMJIT_API virtual ~CodeGen();
ASMJIT_API ExternalTool() noexcept;
ASMJIT_API virtual ~ExternalTool() noexcept;
// --------------------------------------------------------------------------
// [Attach / Reset]
@@ -364,10 +312,10 @@ struct ASMJIT_VIRTAPI CodeGen {
//! \internal
//!
//! Called to attach this code generator to the `assembler`.
virtual Error attach(Assembler* assembler) = 0;
virtual Error attach(Assembler* assembler) noexcept = 0;
//! Reset the code-generator (also detaches if attached).
virtual void reset(bool releaseMemory) = 0;
virtual void reset(bool releaseMemory) noexcept = 0;
// --------------------------------------------------------------------------
// [Finalize]
@@ -377,45 +325,45 @@ struct ASMJIT_VIRTAPI CodeGen {
//!
//! The finalization has two passes:
//! - serializes code to the attached assembler.
//! - resets the `CodeGen` (detaching from the `Assembler as well) so it can
//! be reused or destroyed.
virtual Error finalize() = 0;
//! - resets the `ExternalTool` (detaching from the `Assembler as well) so
//! it can be reused or destroyed).
virtual Error finalize() noexcept = 0;
// --------------------------------------------------------------------------
// [Runtime / Assembler]
// --------------------------------------------------------------------------
//! Get the `Runtime` instance that is associated with the code-generator.
ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; }
ASMJIT_INLINE Runtime* getRuntime() const noexcept { return _runtime; }
//! Get the `Assembler` instance that is associated with the code-generator.
ASMJIT_INLINE Assembler* getAssembler() const { return _assembler; }
ASMJIT_INLINE Assembler* getAssembler() const noexcept { return _assembler; }
// --------------------------------------------------------------------------
// [Architecture]
// --------------------------------------------------------------------------
//! Get the target architecture.
ASMJIT_INLINE uint32_t getArch() const { return _arch; }
ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; }
//! Get the default register size - 4 or 8 bytes, depends on the target.
ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; }
ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regSize; }
// --------------------------------------------------------------------------
// [Error Handling]
// --------------------------------------------------------------------------
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const { return _lastError; }
ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; }
//! Set the last error code and propagate it through the error handler.
ASMJIT_API Error setLastError(Error error, const char* message = nullptr);
ASMJIT_API Error setLastError(Error error, const char* message = nullptr) noexcept;
//! Clear the last error code.
ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; }
ASMJIT_INLINE void resetLastError() noexcept { _lastError = kErrorOk; }
// --------------------------------------------------------------------------
// [CodeGen]
// [ID]
// --------------------------------------------------------------------------
//! Get the code-generator ID, provided by `Assembler` when attached to it.
ASMJIT_INLINE uint64_t getHLId() const { return _hlId; }
//! Get the tool ID, provided by `Assembler` when attached to it.
ASMJIT_INLINE uint64_t getExId() const noexcept { return _exId; }
// --------------------------------------------------------------------------
// [Members]
@@ -426,17 +374,17 @@ struct ASMJIT_VIRTAPI CodeGen {
//! Associated assembler.
Assembler* _assembler;
//! High-level ID, provided by `Assembler`.
//! `ExternalTool` ID, provided by `Assembler`.
//!
//! If multiple high-evel code generators are associated with a single
//! assembler the `_hlId` member can be used to distinguish between them and
//! assembler the `_exId` member can be used to distinguish between them and
//! to provide a mechanism to check whether the high-level code generator is
//! accessing the resource it really owns.
uint64_t _hlId;
uint64_t _exId;
//! Target architecture ID.
//! Target's architecture ID.
uint8_t _arch;
//! Target architecture GP register size in bytes (4 or 8).
//! Target's architecture GP register size in bytes (4 or 8).
uint8_t _regSize;
//! The code generator has been finalized.
uint8_t _finalized;
@@ -452,21 +400,97 @@ struct ASMJIT_VIRTAPI CodeGen {
//! Base assembler.
//!
//! This class implements the base interface that is used by architecture
//! This class implements a base interface that is used by architecture
//! specific assemblers.
//!
//! \sa Compiler.
struct ASMJIT_VIRTAPI Assembler {
ASMJIT_NO_COPY(Assembler)
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Assembler options.
ASMJIT_ENUM(Options) {
//! Emit optimized code-alignment sequences (`Assembler` and `Compiler`).
//!
//! Default `true`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Default align sequence used by X86/X64 architecture is one-byte 0x90
//! opcode that is mostly shown by disassemblers as nop. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster.
//! If this feature is enabled asmjit will generate specialized sequences
//! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used,
//! it can add REX prefixes into the code to make some instructions greater
//! so no alignment sequence is needed.
kOptionOptimizedAlign = 0,
//! Emit jump-prediction hints (`Assembler` and `Compiler`).
//!
//! Default `false`.
//!
//! X86/X64 Specific
//! ----------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction that ignores any static hints.
kOptionPredictedJumps = 1
};
// --------------------------------------------------------------------------
// [Buffer]
// --------------------------------------------------------------------------
//! Code or data buffer.
struct Buffer {
//! Code data.
uint8_t* data;
//! Total length of `data` in bytes.
size_t capacity;
//! Number of bytes of `data` used.
size_t length;
//! Current offset (assembler's cursor) in bytes.
size_t offset;
};
// --------------------------------------------------------------------------
// [Section]
// --------------------------------------------------------------------------
//! Code or data section.
struct Section {
//! Section id.
uint32_t id;
//! Section flags.
uint32_t flags;
//! Section name (limited to 35 characters, PE allows max 8 chars).
char name[36];
//! Section alignment requirements (0 if no requirements).
uint32_t alignment;
//! Section content.
Buffer content;
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new `Assembler` instance.
ASMJIT_API Assembler(Runtime* runtime);
ASMJIT_API Assembler(Runtime* runtime) noexcept;
//! Destroy the `Assembler` instance.
ASMJIT_API virtual ~Assembler();
ASMJIT_API virtual ~Assembler() noexcept;
// --------------------------------------------------------------------------
// [Reset]
@@ -475,7 +499,7 @@ struct ASMJIT_VIRTAPI Assembler {
//! Reset the assembler.
//!
//! If `releaseMemory` is true all buffers will be released to the system.
ASMJIT_API void reset(bool releaseMemory = false);
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Runtime]
@@ -484,16 +508,16 @@ struct ASMJIT_VIRTAPI Assembler {
//! Get the runtime associated with the assembler.
//!
//! NOTE: Runtime is persistent across `reset()` calls.
ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; }
ASMJIT_INLINE Runtime* getRuntime() const noexcept { return _runtime; }
// --------------------------------------------------------------------------
// [Architecture]
// --------------------------------------------------------------------------
//! Get the target architecture.
ASMJIT_INLINE uint32_t getArch() const { return _arch; }
ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; }
//! Get the default register size - 4 or 8 bytes, depends on the target.
ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; }
ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regSize; }
// --------------------------------------------------------------------------
// [Logging]
@@ -501,11 +525,11 @@ struct ASMJIT_VIRTAPI Assembler {
#if !defined(ASMJIT_DISABLE_LOGGER)
//! Get whether the assembler has a logger.
ASMJIT_INLINE bool hasLogger() const { return _logger != nullptr; }
ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; }
//! Get the logger.
ASMJIT_INLINE Logger* getLogger() const { return _logger; }
ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; }
//! Set the logger to `logger`.
ASMJIT_INLINE void setLogger(Logger* logger) { _logger = logger; }
ASMJIT_INLINE void setLogger(Logger* logger) noexcept { _logger = logger; }
#endif // !ASMJIT_DISABLE_LOGGER
// --------------------------------------------------------------------------
@@ -513,71 +537,70 @@ struct ASMJIT_VIRTAPI Assembler {
// --------------------------------------------------------------------------
//! Get the error handler.
ASMJIT_INLINE ErrorHandler* getErrorHandler() const { return _errorHandler; }
ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; }
//! Set the error handler.
ASMJIT_API Error setErrorHandler(ErrorHandler* handler);
ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept;
//! Clear the error handler.
ASMJIT_INLINE Error resetErrorHandler() { return setErrorHandler(nullptr); }
ASMJIT_INLINE Error resetErrorHandler() noexcept { return setErrorHandler(nullptr); }
//! Get the last error code.
ASMJIT_INLINE Error getLastError() const { return _lastError; }
ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; }
//! Set the last error code and propagate it through the error handler.
ASMJIT_API Error setLastError(Error error, const char* message = nullptr);
ASMJIT_API Error setLastError(Error error, const char* message = nullptr) noexcept;
//! Clear the last error code.
ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; }
ASMJIT_INLINE void resetLastError() noexcept { _lastError = kErrorOk; }
// --------------------------------------------------------------------------
// [External CodeGen]
// [Serializers]
// --------------------------------------------------------------------------
//! \internal
//!
//! Called after the code generator `cg` has been attached to the assembler.
ASMJIT_INLINE void _attached(CodeGen* cg) {
cg->_runtime = getRuntime();
cg->_assembler = this;
cg->_hlId = _nextExternalId();
_hlAttachedCount++;
ASMJIT_INLINE void _attached(ExternalTool* exTool) noexcept {
exTool->_runtime = getRuntime();
exTool->_assembler = this;
exTool->_exId = _nextExId();
_exCountAttached++;
}
//! \internal
//!
//! Called after the code generator `cg` has been detached from the assembler.
ASMJIT_INLINE void _detached(CodeGen* cg) {
cg->_runtime = nullptr;
cg->_assembler = nullptr;
cg->_hlId = 0;
_hlAttachedCount--;
ASMJIT_INLINE void _detached(ExternalTool* exTool) noexcept {
exTool->_runtime = nullptr;
exTool->_assembler = nullptr;
exTool->_exId = 0;
_exCountAttached--;
}
//! \internal
//!
//! Return a new code-gen ID (always greater than zero).
ASMJIT_INLINE uint64_t _nextExternalId() {
ASMJIT_ASSERT(_hlIdGenerator != ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF));
return ++_hlIdGenerator;
ASMJIT_INLINE uint64_t _nextExId() noexcept {
ASMJIT_ASSERT(_exIdGenerator != ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF));
return ++_exIdGenerator;
}
// --------------------------------------------------------------------------
// [Assembler Features]
// [Assembler Options]
// --------------------------------------------------------------------------
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const { return _features; }
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; }
//! Get code-generator `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
//! Get global assembler options.
ASMJIT_INLINE uint32_t getAsmOptions() const noexcept {
return _asmOptions;
}
//! Set code-generator `feature` to `value`.
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
//! Get whether the global assembler `option` is turned on.
ASMJIT_INLINE bool hasAsmOption(uint32_t option) const noexcept {
return (_asmOptions & option) != 0;
}
//! Turn on global assembler `options`.
ASMJIT_INLINE void addAsmOptions(uint32_t options) noexcept {
_asmOptions |= options;
}
//! Turn off global assembler `options`.
ASMJIT_INLINE void clearAsmOptions(uint32_t options) noexcept {
_asmOptions &= ~options;
}
// --------------------------------------------------------------------------
@@ -585,12 +608,15 @@ struct ASMJIT_VIRTAPI Assembler {
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; }
ASMJIT_INLINE uint32_t getInstOptions() const noexcept {
return _instOptions;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; }
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept {
_instOptions = instOptions;
}
//! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
ASMJIT_INLINE uint32_t getInstOptionsAndReset() noexcept {
uint32_t instOptions = _instOptions;
_instOptions = 0;
return instOptions;
@@ -605,17 +631,23 @@ struct ASMJIT_VIRTAPI Assembler {
//! The internal code-buffer will grow at least by `n` bytes so `n` bytes can
//! be added to it. If `n` is zero or `getOffset() + n` is not greater than
//! the current capacity of the code-buffer this function does nothing.
ASMJIT_API Error _grow(size_t n);
ASMJIT_API Error _grow(size_t n) noexcept;
//! Reserve the code-buffer to at least `n` bytes.
ASMJIT_API Error _reserve(size_t n);
ASMJIT_API Error _reserve(size_t n) noexcept;
//! Get capacity of the code-buffer.
ASMJIT_INLINE size_t getCapacity() const { return (size_t)(_end - _buffer); }
ASMJIT_INLINE size_t getCapacity() const noexcept {
return (size_t)(_end - _buffer);
}
//! Get the number of remaining bytes in code-buffer.
ASMJIT_INLINE size_t getRemainingSpace() const { return (size_t)(_end - _cursor); }
ASMJIT_INLINE size_t getRemainingSpace() const noexcept {
return (size_t)(_end - _cursor);
}
//! Get current offset in buffer, same as `getOffset() + getTramplineSize()`.
ASMJIT_INLINE size_t getCodeSize() const { return getOffset() + getTrampolinesSize(); }
ASMJIT_INLINE size_t getCodeSize() const noexcept {
return getOffset() + getTrampolinesSize();
}
//! Get size of all possible trampolines.
//!
@@ -623,25 +655,25 @@ struct ASMJIT_VIRTAPI Assembler {
//! addresses. This value is only non-zero if jmp of call instructions were
//! used with immediate operand (this means jumping or calling an absolute
//! address directly).
ASMJIT_INLINE size_t getTrampolinesSize() const { return _trampolinesSize; }
ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; }
//! Get code-buffer.
ASMJIT_INLINE uint8_t* getBuffer() const { return _buffer; }
ASMJIT_INLINE uint8_t* getBuffer() const noexcept { return _buffer; }
//! Get the end of the code-buffer (points to the first byte that is invalid).
ASMJIT_INLINE uint8_t* getEnd() const { return _end; }
ASMJIT_INLINE uint8_t* getEnd() const noexcept { return _end; }
//! Get the current position in the code-buffer.
ASMJIT_INLINE uint8_t* getCursor() const { return _cursor; }
ASMJIT_INLINE uint8_t* getCursor() const noexcept { return _cursor; }
//! Set the current position in the buffer.
ASMJIT_INLINE void setCursor(uint8_t* cursor) {
ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept {
ASMJIT_ASSERT(cursor >= _buffer && cursor <= _end);
_cursor = cursor;
}
//! Get the current offset in the buffer.
ASMJIT_INLINE size_t getOffset() const { return (size_t)(_cursor - _buffer); }
ASMJIT_INLINE size_t getOffset() const noexcept { return (size_t)(_cursor - _buffer); }
//! Set the current offset in the buffer to `offset` and return the previous value.
ASMJIT_INLINE size_t setOffset(size_t offset) {
ASMJIT_INLINE size_t setOffset(size_t offset) noexcept {
ASMJIT_ASSERT(offset < getCapacity());
size_t oldOffset = (size_t)(_cursor - _buffer);
@@ -649,76 +681,100 @@ struct ASMJIT_VIRTAPI Assembler {
return oldOffset;
}
//! Get BYTE at position `pos`.
ASMJIT_INLINE uint8_t getByteAt(size_t pos) const {
//! Read `int8_t` at index `pos`.
ASMJIT_INLINE int32_t readI8At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const uint8_t*>(_buffer + pos);
return Utils::readI8(_buffer + pos);
}
//! Get WORD at position `pos`.
ASMJIT_INLINE uint16_t getWordAt(size_t pos) const {
ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const uint16_t*>(_buffer + pos);
}
//! Get DWORD at position `pos`.
ASMJIT_INLINE uint32_t getDWordAt(size_t pos) const {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const uint32_t*>(_buffer + pos);
}
//! Get QWORD at position `pos`.
ASMJIT_INLINE uint64_t getQWordAt(size_t pos) const {
ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const uint64_t*>(_buffer + pos);
}
//! Get int32_t at position `pos`.
ASMJIT_INLINE int32_t getInt32At(size_t pos) const {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const int32_t*>(_buffer + pos);
}
//! Get uint32_t at position `pos`.
ASMJIT_INLINE uint32_t getUInt32At(size_t pos) const {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
return *reinterpret_cast<const uint32_t*>(_buffer + pos);
}
//! Set BYTE at position `pos`.
ASMJIT_INLINE void setByteAt(size_t pos, uint8_t x) {
//! Read `uint8_t` at index `pos`.
ASMJIT_INLINE uint32_t readU8At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer));
*reinterpret_cast<uint8_t*>(_buffer + pos) = x;
return Utils::readU8(_buffer + pos);
}
//! Set WORD at position `pos`.
ASMJIT_INLINE void setWordAt(size_t pos, uint16_t x) {
//! Read `int16_t` at index `pos`.
ASMJIT_INLINE int32_t readI16At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer));
*reinterpret_cast<uint16_t*>(_buffer + pos) = x;
return Utils::readI16u(_buffer + pos);
}
//! Set DWORD at position `pos`.
ASMJIT_INLINE void setDWordAt(size_t pos, uint32_t x) {
//! Read `uint16_t` at index `pos`.
ASMJIT_INLINE uint32_t readU16At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer));
return Utils::readU16u(_buffer + pos);
}
//! Read `int32_t` at index `pos`.
ASMJIT_INLINE int32_t readI32At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
*reinterpret_cast<uint32_t*>(_buffer + pos) = x;
return Utils::readI32u(_buffer + pos);
}
//! Set QWORD at position `pos`.
ASMJIT_INLINE void setQWordAt(size_t pos, uint64_t x) {
//! Read `uint32_t` at index `pos`.
ASMJIT_INLINE uint32_t readU32At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
return Utils::readU32u(_buffer + pos);
}
//! Read `uint64_t` at index `pos`.
ASMJIT_INLINE int64_t readI64At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer));
*reinterpret_cast<uint64_t*>(_buffer + pos) = x;
return Utils::readI64u(_buffer + pos);
}
//! Set int32_t at position `pos`.
ASMJIT_INLINE void setInt32At(size_t pos, int32_t x) {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
*reinterpret_cast<int32_t*>(_buffer + pos) = x;
//! Read `uint64_t` at index `pos`.
ASMJIT_INLINE uint64_t readU64At(size_t pos) const noexcept {
ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer));
return Utils::readU64u(_buffer + pos);
}
//! Set uint32_t at position `pos`.
ASMJIT_INLINE void setUInt32At(size_t pos, uint32_t x) {
//! Write `int8_t` at index `pos`.
ASMJIT_INLINE void writeI8At(size_t pos, int32_t x) noexcept {
ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer));
Utils::writeI8(_buffer + pos, x);
}
//! Write `uint8_t` at index `pos`.
ASMJIT_INLINE void writeU8At(size_t pos, uint32_t x) noexcept {
ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer));
Utils::writeU8(_buffer + pos, x);
}
//! Write `int8_t` at index `pos`.
ASMJIT_INLINE void writeI16At(size_t pos, int32_t x) noexcept {
ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer));
Utils::writeI16u(_buffer + pos, x);
}
//! Write `uint8_t` at index `pos`.
ASMJIT_INLINE void writeU16At(size_t pos, uint32_t x) noexcept {
ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer));
Utils::writeU16u(_buffer + pos, x);
}
//! Write `int32_t` at index `pos`.
ASMJIT_INLINE void writeI32At(size_t pos, int32_t x) noexcept {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
*reinterpret_cast<uint32_t*>(_buffer + pos) = x;
Utils::writeI32u(_buffer + pos, x);
}
//! Write `uint32_t` at index `pos`.
ASMJIT_INLINE void writeU32At(size_t pos, uint32_t x) noexcept {
ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer));
Utils::writeU32u(_buffer + pos, x);
}
//! Write `int64_t` at index `pos`.
ASMJIT_INLINE void writeI64At(size_t pos, int64_t x) noexcept {
ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer));
Utils::writeI64u(_buffer + pos, x);
}
//! Write `uint64_t` at index `pos`.
ASMJIT_INLINE void writeU64At(size_t pos, uint64_t x) noexcept {
ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer));
Utils::writeU64u(_buffer + pos, x);
}
// --------------------------------------------------------------------------
@@ -726,7 +782,7 @@ struct ASMJIT_VIRTAPI Assembler {
// --------------------------------------------------------------------------
//! Embed raw data into the code-buffer.
ASMJIT_API virtual Error embed(const void* data, uint32_t size);
ASMJIT_API virtual Error embed(const void* data, uint32_t size) noexcept;
// --------------------------------------------------------------------------
// [Align]
@@ -736,24 +792,24 @@ struct ASMJIT_VIRTAPI Assembler {
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current depends on `alignMode`, see \ref AlignMode.
virtual Error align(uint32_t alignMode, uint32_t offset) = 0;
virtual Error align(uint32_t alignMode, uint32_t offset) noexcept = 0;
// --------------------------------------------------------------------------
// [Label]
// --------------------------------------------------------------------------
//! Get number of labels created.
ASMJIT_INLINE size_t getLabelsCount() const {
return _labelList.getLength();
ASMJIT_INLINE size_t getLabelsCount() const noexcept {
return _labels.getLength();
}
//! Get whether the `label` is valid (i.e. registered).
ASMJIT_INLINE bool isLabelValid(const Label& label) const {
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! Get whether the label `id` is valid (i.e. registered).
ASMJIT_INLINE bool isLabelValid(uint32_t id) const {
return static_cast<size_t>(id) < _labelList.getLength();
ASMJIT_INLINE bool isLabelValid(uint32_t id) const noexcept {
return static_cast<size_t>(id) < _labels.getLength();
}
//! Get whether the `label` is bound.
@@ -762,53 +818,52 @@ struct ASMJIT_VIRTAPI Assembler {
//! of the label by using `isLabelValid()` method before the bound check if
//! you are not sure about its validity, otherwise you may hit an assertion
//! failure in debug mode, and undefined behavior in release mode.
ASMJIT_INLINE bool isLabelBound(const Label& label) const {
ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept {
return isLabelBound(label.getId());
}
//! \overload
ASMJIT_INLINE bool isLabelBound(uint32_t id) const {
ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id]->offset != -1;
return _labels[id]->offset != -1;
}
//! Get a `label` offset or -1 if the label is not yet bound.
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const {
ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept {
return getLabelOffset(label.getId());
}
//! \overload
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const {
ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept {
ASMJIT_ASSERT(isLabelValid(id));
return _labelList[id]->offset;
return _labels[id]->offset;
}
//! Get `LabelData` by `label`.
ASMJIT_INLINE LabelData* getLabelData(const Label& label) const {
ASMJIT_INLINE LabelData* getLabelData(const Label& label) const noexcept {
return getLabelData(label.getId());
}
//! \overload
ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const {
ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const noexcept {
ASMJIT_ASSERT(isLabelValid(id));
return const_cast<LabelData*>(_labelList[id]);
return const_cast<LabelData*>(_labels[id]);
}
//! \internal
//!
//! Create a new label and return its ID.
ASMJIT_API uint32_t _newLabelId();
ASMJIT_API uint32_t _newLabelId() noexcept;
//! \internal
//!
//! New LabelLink instance.
ASMJIT_API LabelLink* _newLabelLink();
ASMJIT_API LabelLink* _newLabelLink() noexcept;
//! Create and return a new `Label`.
ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); }
ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); }
//! Bind the `label` to the current offset.
//!
//! \note Label can be bound only once!
ASMJIT_API virtual Error bind(const Label& label);
ASMJIT_API virtual Error bind(const Label& label) noexcept;
// --------------------------------------------------------------------------
// [Reloc]
@@ -831,18 +886,18 @@ struct ASMJIT_VIRTAPI Assembler {
//!
//! A given buffer will be overwritten, to get the number of bytes required,
//! use `getCodeSize()`.
ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const;
ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const noexcept;
//! \internal
//!
//! Reloc code.
virtual size_t _relocCode(void* dst, Ptr baseAddress) const = 0;
virtual size_t _relocCode(void* dst, Ptr baseAddress) const noexcept = 0;
// --------------------------------------------------------------------------
// [Make]
// --------------------------------------------------------------------------
ASMJIT_API virtual void* make();
ASMJIT_API virtual void* make() noexcept;
// --------------------------------------------------------------------------
// [Emit]
@@ -886,14 +941,8 @@ struct ASMJIT_VIRTAPI Assembler {
//! Associated runtime.
Runtime* _runtime;
#if !defined(ASMJIT_DISABLE_LOGGER)
//! Associated logger.
Logger* _logger;
#else
//! Makes libraries built with or without logging support binary compatible.
void* _logger;
#endif // ASMJIT_DISABLE_LOGGER
//! Associated error handler, triggered by \ref setLastError().
ErrorHandler* _errorHandler;
@@ -904,26 +953,26 @@ struct ASMJIT_VIRTAPI Assembler {
//! \internal
uint16_t _reserved;
//! Assembler features, used by \ref hasFeature() and \ref setFeature().
uint32_t _features;
//! Options affecting the next instruction.
//! Assembler options, used by \ref getAsmOptions() and \ref hasAsmOption().
uint32_t _asmOptions;
//! Instruction options, affect the next instruction that will be emitted.
uint32_t _instOptions;
//! Last error code.
uint32_t _lastError;
//! CodeGen ID generator.
uint64_t _hlIdGenerator;
//! Count of high-level code generators attached.
size_t _hlAttachedCount;
//! External tool ID generator.
uint64_t _exIdGenerator;
//! Count of external tools currently attached.
size_t _exCountAttached;
//! General purpose zone allocator.
Zone _zoneAllocator;
//! Start of the code-buffer.
//! Start of the code-buffer of the current section.
uint8_t* _buffer;
//! End of the code-buffer (points to the first invalid byte).
//! End of the code-buffer of the current section (points to the first invalid byte).
uint8_t* _end;
//! The current position in code `_buffer`.
//! The current position in `_buffer` of the current section.
uint8_t* _cursor;
//! Size of all possible trampolines.
@@ -934,23 +983,16 @@ struct ASMJIT_VIRTAPI Assembler {
//! Unused `LabelLink` structures pool.
LabelLink* _unusedLinks;
//! LabelData list.
PodVector<LabelData*> _labelList;
//! RelocData list.
PodVector<RelocData> _relocList;
//! Assembler sections.
PodVectorTmp<Section*, 4> _sections;
//! Assembler labels.
PodVectorTmp<LabelData*, 16> _labels;
//! Table of relocations.
PodVector<RelocData> _relocations;
};
//! \}
// ============================================================================
// [Defined-Later]
// ============================================================================
ASMJIT_INLINE Label::Label(Assembler& a) : Operand(NoInit) {
reset();
_label.id = a._newLabelId();
}
} // asmjit namespace
// [Api-End]

View File

@@ -38,7 +38,7 @@ enum { kCompilerDefaultLookAhead = 64 };
// [asmjit::Compiler - Construction / Destruction]
// ============================================================================
Compiler::Compiler() :
Compiler::Compiler() noexcept :
_features(0),
_maxLookAhead(kCompilerDefaultLookAhead),
_instOptions(0),
@@ -56,13 +56,13 @@ Compiler::Compiler() :
_constAllocator(4096 - Zone::kZoneOverhead),
_localConstPool(&_constAllocator),
_globalConstPool(&_zoneAllocator) {}
Compiler::~Compiler() {}
Compiler::~Compiler() noexcept {}
// ============================================================================
// [asmjit::Compiler - Attach / Reset]
// ============================================================================
void Compiler::reset(bool releaseMemory) {
void Compiler::reset(bool releaseMemory) noexcept {
Assembler* assembler = getAssembler();
if (assembler != nullptr)
assembler->_detached(this);
@@ -105,7 +105,7 @@ void Compiler::reset(bool releaseMemory) {
// [asmjit::Compiler - Node-Factory]
// ============================================================================
HLData* Compiler::newDataNode(const void* data, uint32_t size) {
HLData* Compiler::newDataNode(const void* data, uint32_t size) noexcept {
if (size > HLData::kInlineBufferSize) {
void* clonedData = _stringAllocator.alloc(size);
if (clonedData == nullptr)
@@ -119,11 +119,11 @@ HLData* Compiler::newDataNode(const void* data, uint32_t size) {
return newNode<HLData>(const_cast<void*>(data), size);
}
HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) {
HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) noexcept {
return newNode<HLAlign>(alignMode, offset);
}
HLLabel* Compiler::newLabelNode() {
HLLabel* Compiler::newLabelNode() noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
@@ -134,16 +134,16 @@ HLLabel* Compiler::newLabelNode() {
if (node == nullptr) return nullptr;
// These have to be zero now.
ASMJIT_ASSERT(ld->hlId == 0);
ASMJIT_ASSERT(ld->hlData == nullptr);
ASMJIT_ASSERT(ld->exId == 0);
ASMJIT_ASSERT(ld->exData == nullptr);
ld->hlId = _hlId;
ld->hlData = node;
ld->exId = _exId;
ld->exData = node;
return node;
}
HLComment* Compiler::newCommentNode(const char* str) {
HLComment* Compiler::newCommentNode(const char* str) noexcept {
if (str != nullptr && str[0]) {
str = _stringAllocator.sdup(str);
if (str == nullptr)
@@ -153,7 +153,7 @@ HLComment* Compiler::newCommentNode(const char* str) {
return newNode<HLComment>(str);
}
HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) {
HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return nullptr;
@@ -165,7 +165,7 @@ HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) {
// [asmjit::Compiler - Code-Stream]
// ============================================================================
HLNode* Compiler::addNode(HLNode* node) {
HLNode* Compiler::addNode(HLNode* node) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
@@ -199,7 +199,7 @@ HLNode* Compiler::addNode(HLNode* node) {
return node;
}
HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) {
HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
@@ -220,7 +220,7 @@ HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) {
return node;
}
HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) {
HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) noexcept {
ASMJIT_ASSERT(node != nullptr);
ASMJIT_ASSERT(node->_prev == nullptr);
ASMJIT_ASSERT(node->_next == nullptr);
@@ -241,7 +241,7 @@ HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) {
return node;
}
static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) {
static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) noexcept {
if (node_->isJmpOrJcc()) {
HLJump* node = static_cast<HLJump*>(node_);
HLLabel* label = node->getTarget();
@@ -269,7 +269,7 @@ static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) {
}
}
HLNode* Compiler::removeNode(HLNode* node) {
HLNode* Compiler::removeNode(HLNode* node) noexcept {
HLNode* prev = node->_prev;
HLNode* next = node->_next;
@@ -293,7 +293,7 @@ HLNode* Compiler::removeNode(HLNode* node) {
return node;
}
void Compiler::removeNodes(HLNode* first, HLNode* last) {
void Compiler::removeNodes(HLNode* first, HLNode* last) noexcept {
if (first == last) {
removeNode(first);
return;
@@ -330,7 +330,7 @@ void Compiler::removeNodes(HLNode* first, HLNode* last) {
}
}
HLNode* Compiler::setCursor(HLNode* node) {
HLNode* Compiler::setCursor(HLNode* node) noexcept {
HLNode* old = _cursor;
_cursor = node;
return old;
@@ -340,7 +340,7 @@ HLNode* Compiler::setCursor(HLNode* node) {
// [asmjit::Compiler - Align]
// ============================================================================
Error Compiler::align(uint32_t alignMode, uint32_t offset) {
Error Compiler::align(uint32_t alignMode, uint32_t offset) noexcept {
HLAlign* node = newAlignNode(alignMode, offset);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
@@ -353,25 +353,25 @@ Error Compiler::align(uint32_t alignMode, uint32_t offset) {
// [asmjit::Compiler - Label]
// ============================================================================
HLLabel* Compiler::getHLLabel(uint32_t id) const {
HLLabel* Compiler::getHLLabel(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return nullptr;
LabelData* ld = assembler->getLabelData(id);
if (ld->hlId == _hlId)
return static_cast<HLLabel*>(ld->hlData);
if (ld->exId == _exId)
return static_cast<HLLabel*>(ld->exData);
else
return nullptr;
}
bool Compiler::isLabelValid(uint32_t id) const {
bool Compiler::isLabelValid(uint32_t id) const noexcept {
Assembler* assembler = getAssembler();
if (assembler == nullptr) return false;
return static_cast<size_t>(id) < assembler->getLabelsCount();
}
uint32_t Compiler::_newLabelId() {
uint32_t Compiler::_newLabelId() noexcept {
HLLabel* node = newLabelNode();
if (node == nullptr) {
setLastError(kErrorNoHeapMemory);
@@ -381,7 +381,7 @@ uint32_t Compiler::_newLabelId() {
return node->getLabelId();
}
Error Compiler::bind(const Label& label) {
Error Compiler::bind(const Label& label) noexcept {
HLLabel* node = getHLLabel(label);
if (node == nullptr)
return setLastError(kErrorInvalidState);
@@ -393,7 +393,7 @@ Error Compiler::bind(const Label& label) {
// [asmjit::Compiler - Embed]
// ============================================================================
Error Compiler::embed(const void* data, uint32_t size) {
Error Compiler::embed(const void* data, uint32_t size) noexcept {
HLData* node = newDataNode(data, size);
if (node == nullptr)
return setLastError(kErrorNoHeapMemory);
@@ -402,7 +402,7 @@ Error Compiler::embed(const void* data, uint32_t size) {
return kErrorOk;
}
Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) {
Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) noexcept {
if (label.getId() == kInvalidValue)
return kErrorInvalidState;
@@ -423,7 +423,7 @@ Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) {
// [asmjit::Compiler - Comment]
// ============================================================================
Error Compiler::comment(const char* fmt, ...) {
Error Compiler::comment(const char* fmt, ...) noexcept {
char buf[256];
char* p = buf;
@@ -448,7 +448,7 @@ Error Compiler::comment(const char* fmt, ...) {
// [asmjit::Compiler - Hint]
// ============================================================================
Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) {
Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
@@ -464,9 +464,9 @@ Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) {
// [asmjit::Compiler - Vars]
// ============================================================================
VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) {
VarData* Compiler::_newVd(const VarInfo& vi, const char* name) noexcept {
VarData* vd = reinterpret_cast<VarData*>(_varAllocator.alloc(sizeof(VarData)));
if (vd == nullptr)
if (ASMJIT_UNLIKELY(vd == nullptr))
goto _NoMemory;
vd->_name = noName;
@@ -479,8 +479,8 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
}
#endif // !ASMJIT_DISABLE_LOGGER
vd->_type = static_cast<uint8_t>(type);
vd->_class = static_cast<uint8_t>(c);
vd->_type = static_cast<uint8_t>(vi.getTypeId());
vd->_class = static_cast<uint8_t>(vi.getRegClass());
vd->_flags = 0;
vd->_priority = 10;
@@ -492,9 +492,9 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
vd->_saveOnUnuse = false;
vd->_modified = false;
vd->_reserved0 = 0;
vd->_alignment = static_cast<uint8_t>(Utils::iMin<uint32_t>(size, 64));
vd->_alignment = static_cast<uint8_t>(Utils::iMin<uint32_t>(vi.getSize(), 64));
vd->_size = size;
vd->_size = vi.getSize();
vd->_homeMask = 0;
vd->_memOffset = 0;
@@ -507,7 +507,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char*
vd->_va = nullptr;
if (_varList.append(vd) != kErrorOk)
if (ASMJIT_UNLIKELY(_varList.append(vd) != kErrorOk))
goto _NoMemory;
return vd;
@@ -516,43 +516,43 @@ _NoMemory:
return nullptr;
}
Error Compiler::alloc(Var& var) {
Error Compiler::alloc(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, kInvalidValue);
}
Error Compiler::alloc(Var& var, uint32_t regIndex) {
Error Compiler::alloc(Var& var, uint32_t regIndex) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, regIndex);
}
Error Compiler::alloc(Var& var, const Reg& reg) {
Error Compiler::alloc(Var& var, const Reg& reg) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintAlloc, reg.getRegIndex());
}
Error Compiler::save(Var& var) {
Error Compiler::save(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSave, kInvalidValue);
}
Error Compiler::spill(Var& var) {
Error Compiler::spill(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintSpill, kInvalidValue);
}
Error Compiler::unuse(Var& var) {
Error Compiler::unuse(Var& var) noexcept {
if (var.getId() == kInvalidValue)
return kErrorOk;
return _hint(var, kVarHintUnuse, kInvalidValue);
}
uint32_t Compiler::getPriority(Var& var) const {
uint32_t Compiler::getPriority(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return kInvalidValue;
@@ -560,7 +560,7 @@ uint32_t Compiler::getPriority(Var& var) const {
return vd->getPriority();
}
void Compiler::setPriority(Var& var, uint32_t priority) {
void Compiler::setPriority(Var& var, uint32_t priority) noexcept {
if (var.getId() == kInvalidValue)
return;
@@ -571,7 +571,7 @@ void Compiler::setPriority(Var& var, uint32_t priority) {
vd->_priority = static_cast<uint8_t>(priority);
}
bool Compiler::getSaveOnUnuse(Var& var) const {
bool Compiler::getSaveOnUnuse(Var& var) const noexcept {
if (var.getId() == kInvalidValue)
return false;
@@ -579,7 +579,7 @@ bool Compiler::getSaveOnUnuse(Var& var) const {
return static_cast<bool>(vd->_saveOnUnuse);
}
void Compiler::setSaveOnUnuse(Var& var, bool value) {
void Compiler::setSaveOnUnuse(Var& var, bool value) noexcept {
if (var.getId() == kInvalidValue)
return;
@@ -587,7 +587,7 @@ void Compiler::setSaveOnUnuse(Var& var, bool value) {
vd->_saveOnUnuse = value;
}
void Compiler::rename(Var& var, const char* fmt, ...) {
void Compiler::rename(Var& var, const char* fmt, ...) noexcept {
if (var.getId() == kInvalidValue)
return;

View File

@@ -18,6 +18,7 @@
#include "../base/containers.h"
#include "../base/hlstream.h"
#include "../base/operand.h"
#include "../base/podvector.h"
#include "../base/utils.h"
#include "../base/zone.h"
@@ -48,8 +49,8 @@ ASMJIT_ENUM(CompilerFeatures) {
//! Default `false` - has to be explicitly enabled as the scheduler needs
//! some time to run.
//!
//! X86/X64
//! -------
//! X86/X64 Specific
//! ----------------
//!
//! If scheduling is enabled AsmJit will try to reorder instructions to
//! minimize the dependency chain. Scheduler always runs after the registers
@@ -71,6 +72,65 @@ ASMJIT_ENUM(ConstScope) {
kConstScopeGlobal = 1
};
// ============================================================================
// [asmjit::VarInfo]
// ============================================================================
struct VarInfo {
// ============================================================================
// [Flags]
// ============================================================================
//! \internal
//!
//! Variable flags.
ASMJIT_ENUM(Flags) {
//! Variable contains one or more single-precision floating point.
kFlagSP = 0x10,
//! Variable contains one or more double-precision floating point.
kFlagDP = 0x20,
//! Variable is a vector, contains packed data.
kFlagSIMD = 0x80
};
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get type id.
ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; }
//! Get type name.
ASMJIT_INLINE const char* getTypeName() const noexcept { return _typeName; }
//! Get register size in bytes.
ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; }
//! Get variable class, see \ref RegClass.
ASMJIT_INLINE uint32_t getRegClass() const noexcept { return _regClass; }
//! Get register type, see `X86RegType`.
ASMJIT_INLINE uint32_t getRegType() const noexcept { return _regType; }
//! Get type flags, see `VarFlag`.
ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Variable type id.
uint8_t _typeId;
//! Variable and register size (in bytes).
uint8_t _size;
//! Register class, see `RegClass`.
uint8_t _regClass;
//! Register type the variable is mapped to.
uint8_t _regType;
//! Variable info flags, see \ref Flags.
uint32_t _flags;
//! Variable type name.
char _typeName[8];
};
// ============================================================================
// [asmjit::Compiler]
// ============================================================================
@@ -78,7 +138,7 @@ ASMJIT_ENUM(ConstScope) {
//! Compiler interface.
//!
//! \sa Assembler.
struct ASMJIT_VIRTAPI Compiler : public CodeGen {
struct ASMJIT_VIRTAPI Compiler : public ExternalTool {
ASMJIT_NO_COPY(Compiler)
// --------------------------------------------------------------------------
@@ -86,43 +146,51 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
// --------------------------------------------------------------------------
//! Create a new `Compiler` instance.
ASMJIT_API Compiler();
ASMJIT_API Compiler() noexcept;
//! Destroy the `Compiler` instance.
ASMJIT_API virtual ~Compiler();
ASMJIT_API virtual ~Compiler() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! \override
ASMJIT_API virtual void reset(bool releaseMemory);
ASMJIT_API virtual void reset(bool releaseMemory) noexcept;
// --------------------------------------------------------------------------
// [Compiler Features]
// --------------------------------------------------------------------------
//! Get code-generator features.
ASMJIT_INLINE uint32_t getFeatures() const { return _features; }
ASMJIT_INLINE uint32_t getFeatures() const noexcept {
return _features;
}
//! Set code-generator features.
ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; }
ASMJIT_INLINE void setFeatures(uint32_t features) noexcept {
_features = features;
}
//! Get code-generator `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const {
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < 32);
return (_features & (1 << feature)) != 0;
}
//! Set code-generator `feature` to `value`.
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) {
ASMJIT_INLINE void setFeature(uint32_t feature, bool value) noexcept {
ASMJIT_ASSERT(feature < 32);
feature = static_cast<uint32_t>(value) << feature;
_features = (_features & ~feature) | feature;
}
//! Get maximum look ahead.
ASMJIT_INLINE uint32_t getMaxLookAhead() const { return _maxLookAhead; }
ASMJIT_INLINE uint32_t getMaxLookAhead() const noexcept {
return _maxLookAhead;
}
//! Set maximum look ahead to `val`.
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { _maxLookAhead = val; }
ASMJIT_INLINE void setMaxLookAhead(uint32_t val) noexcept {
_maxLookAhead = val;
}
// --------------------------------------------------------------------------
// [Token ID]
@@ -131,21 +199,29 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! \internal
//!
//! Reset the token-id generator.
ASMJIT_INLINE void _resetTokenGenerator() { _tokenGenerator = 0; }
ASMJIT_INLINE void _resetTokenGenerator() noexcept {
_tokenGenerator = 0;
}
//! \internal
//!
//! Generate a new unique token id.
ASMJIT_INLINE uint32_t _generateUniqueToken() { return ++_tokenGenerator; }
ASMJIT_INLINE uint32_t _generateUniqueToken() noexcept {
return ++_tokenGenerator;
}
// --------------------------------------------------------------------------
// [Instruction Options]
// --------------------------------------------------------------------------
//! Get options of the next instruction.
ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; }
ASMJIT_INLINE uint32_t getInstOptions() const noexcept {
return _instOptions;
}
//! Set options of the next instruction.
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; }
ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept {
_instOptions = instOptions;
}
//! Get options of the next instruction and reset them.
ASMJIT_INLINE uint32_t getInstOptionsAndReset() {
@@ -160,28 +236,28 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! \internal
template<typename T>
ASMJIT_INLINE T* newNode() {
ASMJIT_INLINE T* newNode() noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this);
}
//! \internal
template<typename T, typename P0>
ASMJIT_INLINE T* newNode(P0 p0) {
ASMJIT_INLINE T* newNode(P0 p0) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0);
}
//! \internal
template<typename T, typename P0, typename P1>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1) {
ASMJIT_INLINE T* newNode(P0 p0, P1 p1) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1);
}
//! \internal
template<typename T, typename P0, typename P1, typename P2>
ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) {
ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) noexcept {
void* p = _zoneAllocator.alloc(sizeof(T));
return new(p) T(this, p0, p1, p2);
}
@@ -189,66 +265,66 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! \internal
//!
//! Create a new `HLData` node.
ASMJIT_API HLData* newDataNode(const void* data, uint32_t size);
ASMJIT_API HLData* newDataNode(const void* data, uint32_t size) noexcept;
//! \internal
//!
//! Create a new `HLAlign` node.
ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset);
ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset) noexcept;
//! \internal
//!
//! Create a new `HLLabel` node.
ASMJIT_API HLLabel* newLabelNode();
ASMJIT_API HLLabel* newLabelNode() noexcept;
//! \internal
//!
//! Create a new `HLComment`.
ASMJIT_API HLComment* newCommentNode(const char* str);
ASMJIT_API HLComment* newCommentNode(const char* str) noexcept;
//! \internal
//!
//! Create a new `HLHint`.
ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value);
ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Code-Stream]
// --------------------------------------------------------------------------
//! Add node `node` after current and set current to `node`.
ASMJIT_API HLNode* addNode(HLNode* node);
ASMJIT_API HLNode* addNode(HLNode* node) noexcept;
//! Insert `node` before `ref`.
ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref);
ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref) noexcept;
//! Insert `node` after `ref`.
ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref);
ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref) noexcept;
//! Remove `node`.
ASMJIT_API HLNode* removeNode(HLNode* node);
ASMJIT_API HLNode* removeNode(HLNode* node) noexcept;
//! Remove multiple nodes.
ASMJIT_API void removeNodes(HLNode* first, HLNode* last);
ASMJIT_API void removeNodes(HLNode* first, HLNode* last) noexcept;
//! Get the first node.
ASMJIT_INLINE HLNode* getFirstNode() const { return _firstNode; }
ASMJIT_INLINE HLNode* getFirstNode() const noexcept { return _firstNode; }
//! Get the last node.
ASMJIT_INLINE HLNode* getLastNode() const { return _lastNode; }
ASMJIT_INLINE HLNode* getLastNode() const noexcept { return _lastNode; }
//! Get current node.
//!
//! \note If this method returns `nullptr` it means that nothing has been
//! emitted yet.
ASMJIT_INLINE HLNode* getCursor() const { return _cursor; }
ASMJIT_INLINE HLNode* getCursor() const noexcept { return _cursor; }
//! \internal
//!
//! Set the current node without returning the previous node.
ASMJIT_INLINE void _setCursor(HLNode* node) { _cursor = node; }
ASMJIT_INLINE void _setCursor(HLNode* node) noexcept { _cursor = node; }
//! Set the current node to `node` and return the previous one.
ASMJIT_API HLNode* setCursor(HLNode* node);
ASMJIT_API HLNode* setCursor(HLNode* node) noexcept;
// --------------------------------------------------------------------------
// [Func]
// --------------------------------------------------------------------------
//! Get current function.
ASMJIT_INLINE HLFunc* getFunc() const { return _func; }
ASMJIT_INLINE HLFunc* getFunc() const noexcept { return _func; }
// --------------------------------------------------------------------------
// [Align]
@@ -258,7 +334,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current depends on `alignMode`, see \ref AlignMode.
ASMJIT_API Error align(uint32_t alignMode, uint32_t offset);
ASMJIT_API Error align(uint32_t alignMode, uint32_t offset) noexcept;
// --------------------------------------------------------------------------
// [Label]
@@ -267,126 +343,127 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! Get `HLLabel` by `id`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_API HLLabel* getHLLabel(uint32_t id) const;
ASMJIT_API HLLabel* getHLLabel(uint32_t id) const noexcept;
//! Get `HLLabel` by `label`.
//!
//! NOTE: The label has to be valid, see `isLabelValid()`.
ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) { return getHLLabel(label.getId()); }
ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) noexcept {
return getHLLabel(label.getId());
}
//! Get whether the label `id` is valid.
ASMJIT_API bool isLabelValid(uint32_t id) const;
ASMJIT_API bool isLabelValid(uint32_t id) const noexcept;
//! Get whether the `label` is valid.
ASMJIT_INLINE bool isLabelValid(const Label& label) const { return isLabelValid(label.getId()); }
ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept {
return isLabelValid(label.getId());
}
//! \internal
//!
//! Create a new label and return its ID.
ASMJIT_API uint32_t _newLabelId();
ASMJIT_API uint32_t _newLabelId() noexcept;
//! Create and return a new `Label`.
ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); }
ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); }
//! Bind label to the current offset.
//!
//! \note Label can be bound only once!
ASMJIT_API Error bind(const Label& label);
ASMJIT_API Error bind(const Label& label) noexcept;
// --------------------------------------------------------------------------
// [Embed]
// --------------------------------------------------------------------------
//! Embed data.
ASMJIT_API Error embed(const void* data, uint32_t size);
ASMJIT_API Error embed(const void* data, uint32_t size) noexcept;
//! Embed a constant pool data, adding the following in order:
//! 1. Data alignment.
//! 2. Label.
//! 3. Constant pool data.
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool);
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) noexcept;
// --------------------------------------------------------------------------
// [Comment]
// --------------------------------------------------------------------------
//! Emit a single comment line.
ASMJIT_API Error comment(const char* fmt, ...);
ASMJIT_API Error comment(const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Hint]
// --------------------------------------------------------------------------
//! Emit a new hint (purery informational node).
ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value);
ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value) noexcept;
// --------------------------------------------------------------------------
// [Vars]
// --------------------------------------------------------------------------
//! Get whether variable `var` is created.
ASMJIT_INLINE bool isVarValid(const Var& var) const {
return static_cast<size_t>(var.getId() & kOperandIdNum) < _varList.getLength();
ASMJIT_INLINE bool isVarValid(const Var& var) const noexcept {
return static_cast<size_t>(var.getId() & Operand::kIdIndexMask) < _varList.getLength();
}
//! \internal
//!
//! Get `VarData` by `var`.
ASMJIT_INLINE VarData* getVd(const Var& var) const {
ASMJIT_INLINE VarData* getVd(const Var& var) const noexcept {
return getVdById(var.getId());
}
//! \internal
//!
//! Get `VarData` by `id`.
ASMJIT_INLINE VarData* getVdById(uint32_t id) const {
ASMJIT_INLINE VarData* getVdById(uint32_t id) const noexcept {
ASMJIT_ASSERT(id != kInvalidValue);
ASMJIT_ASSERT(static_cast<size_t>(id & kOperandIdNum) < _varList.getLength());
ASMJIT_ASSERT(static_cast<size_t>(id & Operand::kIdIndexMask) < _varList.getLength());
return _varList[id & kOperandIdNum];
return _varList[id & Operand::kIdIndexMask];
}
//! \internal
//!
//! Get an array of 'VarData*'.
ASMJIT_INLINE VarData** _getVdArray() const {
ASMJIT_INLINE VarData** _getVdArray() const noexcept {
return const_cast<VarData**>(_varList.getData());
}
//! \internal
//!
//! Create a new `VarData`.
ASMJIT_API VarData* _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name);
//! Create a new `Var`.
virtual Error _newVar(Var* var, uint32_t vType, const char* name, va_list ap) = 0;
ASMJIT_API VarData* _newVd(const VarInfo& vi, const char* name) noexcept;
//! Alloc variable `var`.
ASMJIT_API Error alloc(Var& var);
ASMJIT_API Error alloc(Var& var) noexcept;
//! Alloc variable `var` using `regIndex` as a register index.
ASMJIT_API Error alloc(Var& var, uint32_t regIndex);
ASMJIT_API Error alloc(Var& var, uint32_t regIndex) noexcept;
//! Alloc variable `var` using `reg` as a register operand.
ASMJIT_API Error alloc(Var& var, const Reg& reg);
ASMJIT_API Error alloc(Var& var, const Reg& reg) noexcept;
//! Spill variable `var`.
ASMJIT_API Error spill(Var& var);
ASMJIT_API Error spill(Var& var) noexcept;
//! Save variable `var` if the status is `modified` at this point.
ASMJIT_API Error save(Var& var);
ASMJIT_API Error save(Var& var) noexcept;
//! Unuse variable `var`.
ASMJIT_API Error unuse(Var& var);
ASMJIT_API Error unuse(Var& var) noexcept;
//! Get priority of variable `var`.
ASMJIT_API uint32_t getPriority(Var& var) const;
ASMJIT_API uint32_t getPriority(Var& var) const noexcept;
//! Set priority of variable `var` to `priority`.
ASMJIT_API void setPriority(Var& var, uint32_t priority);
ASMJIT_API void setPriority(Var& var, uint32_t priority) noexcept;
//! Get save-on-unuse `var` property.
ASMJIT_API bool getSaveOnUnuse(Var& var) const;
ASMJIT_API bool getSaveOnUnuse(Var& var) const noexcept;
//! Set save-on-unuse `var` property to `value`.
ASMJIT_API void setSaveOnUnuse(Var& var, bool value);
ASMJIT_API void setSaveOnUnuse(Var& var, bool value) noexcept;
//! Rename variable `var` to `name`.
//!
//! \note Only new name will appear in the logger.
ASMJIT_API void rename(Var& var, const char* fmt, ...);
ASMJIT_API void rename(Var& var, const char* fmt, ...) noexcept;
// --------------------------------------------------------------------------
// [Stack]
@@ -395,7 +472,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! \internal
//!
//! Create a new memory chunk allocated on the current function's stack.
virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) = 0;
virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept = 0;
// --------------------------------------------------------------------------
// [Const]
@@ -404,7 +481,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
//! \internal
//!
//! Put data to a constant-pool and get a memory reference to it.
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0;
virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
@@ -471,12 +548,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen {
// [Defined-Later]
// ============================================================================
ASMJIT_INLINE Label::Label(Compiler& c) : Operand(NoInit) {
reset();
_label.id = c._newLabelId();
}
ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) {
ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) noexcept {
_prev = nullptr;
_next = nullptr;
_type = static_cast<uint8_t>(type);

View File

@@ -298,7 +298,7 @@ Error Context::removeUnreachableCode() {
node = first;
do {
HLNode* next = node->getNext();
if (!node->isInformative() && node->getType() != kHLNodeTypeAlign) {
if (!node->isInformative() && node->getType() != HLNode::kTypeAlign) {
ASMJIT_TLOG("[%05d] Unreachable\n", node->getFlowId());
compiler->removeNode(node);
}
@@ -394,7 +394,7 @@ _OnVisit:
}
}
if (node->getType() == kHLNodeTypeLabel)
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
@@ -413,7 +413,7 @@ _OnPatch:
if (!bNode->_addBitsDelSource(bCur, bLen))
goto _OnDone;
if (node->getType() == kHLNodeTypeLabel)
if (node->getType() == HLNode::kTypeLabel)
goto _OnTarget;
if (node == func)
@@ -574,15 +574,6 @@ Error Context::formatInlineComment(StringBuilder& dst, HLNode* node) {
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Schedule]
// ============================================================================
Error Context::schedule() {
// By default there is no instruction scheduler implemented.
return kErrorOk;
}
// ============================================================================
// [asmjit::Context - Cleanup]
// ============================================================================
@@ -626,9 +617,6 @@ Error Context::compile(HLFunc* func) {
ASMJIT_PROPAGATE_ERROR(translate());
if (compiler->hasFeature(kCompilerFeatureEnableScheduler))
ASMJIT_PROPAGATE_ERROR(schedule());
// We alter the compiler cursor, because it doesn't make sense to reference
// it after compilation - some nodes may disappear and it's forbidden to add
// new code after the compilation is done.

View File

@@ -13,6 +13,7 @@
// [Dependencies - AsmJit]
#include "../base/compiler.h"
#include "../base/podvector.h"
#include "../base/zone.h"
// [Api-Begin]
@@ -23,22 +24,6 @@ namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::VarFlags]
// ============================================================================
//! \internal
//!
//! X86/X64 variable flags.
ASMJIT_ENUM(VarFlags) {
//! Variable contains single-precision floating-point(s).
kVarFlagSp = 0x10,
//! Variable contains double-precision floating-point(s).
kVarFlagDp = 0x20,
//! Variable is packed, i.e. packed floats, doubles, ...
kVarFlagPacked = 0x40
};
// ============================================================================
// [asmjit::VarAttrFlags]
// ============================================================================
@@ -801,12 +786,6 @@ struct Context {
//! Translate code by allocating registers and handling state changes.
virtual Error translate() = 0;
// --------------------------------------------------------------------------
// [Schedule]
// --------------------------------------------------------------------------
virtual Error schedule();
// --------------------------------------------------------------------------
// [Cleanup]
// --------------------------------------------------------------------------

View File

@@ -59,6 +59,15 @@ namespace asmjit {
//! List of calling conventions for 64-bit x86 mode (x64):
//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI).
//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI).
//!
//! ARM Specific Conventions
//! ------------------------
//!
//! List of ARM calling conventions:
//! - `kCallConvArm32SoftFP` - Legacy calling convention, floating point
//! arguments are passed via GP registers.
//! - `kCallConvArm32HardFP` - Modern calling convention, uses VFP registers
//! to pass floating point arguments.
ASMJIT_ENUM(CallConv) {
//! Calling convention is invalid (can't be used).
kCallConvNone = 0,
@@ -289,6 +298,13 @@ ASMJIT_ENUM(CallConv) {
//! Stack is always aligned to 16 bytes.
kCallConvX64Unix = 11,
// --------------------------------------------------------------------------
// [ARM]
// --------------------------------------------------------------------------
kCallConvArm32SoftFP = 16,
kCallConvArm32HardFP = 17,
// --------------------------------------------------------------------------
// [Internal]
// --------------------------------------------------------------------------
@@ -303,6 +319,11 @@ ASMJIT_ENUM(CallConv) {
//! \internal
_kCallConvX64End = 11,
//! \internal
_kCallConvArmStart = 16,
//! \internal
_kCallConvArmEnd = 17,
// --------------------------------------------------------------------------
// [Host]
// --------------------------------------------------------------------------
@@ -347,6 +368,16 @@ ASMJIT_ENUM(CallConv) {
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#elif ASMJIT_ARCH_ARM32
# if defined(__SOFTFP__)
kCallConvHost = kCallConvArm32SoftFP,
# else
kCallConvHost = kCallConvArm32HardFP,
# endif
// These don't exist on ARM.
kCallConvHostCDecl = kCallConvHost,
kCallConvHostStdCall = kCallConvHost,
kCallConvHostFastCall = kCallConvHost
#else
# error "[asmjit] Couldn't determine the target's calling convention."
#endif
@@ -484,6 +515,7 @@ ASMJIT_ENUM(FuncArgIndex) {
//! This value is typically omitted and added only if there is HI argument
//! accessed.
kFuncArgLo = 0,
//! Index to the HI part of function argument.
//!
//! HI part of function argument depends on target architecture. On x86 it's

View File

@@ -27,7 +27,7 @@ namespace asmjit {
//! \internal
//!
//! Remove left horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) {
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[0];
uint32_t level = node->_level;
@@ -44,7 +44,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* no
//! \internal
//!
//! Remove consecutive horizontal links.
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) {
static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept {
ConstPool::Node* link = node->_link[1];
uint32_t level = node->_level;
@@ -59,7 +59,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* n
return node;
}
ConstPool::Node* ConstPool::Tree::get(const void* data) {
ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept {
ConstPool::Node* node = _root;
size_t dataSize = _dataSize;
@@ -73,7 +73,7 @@ ConstPool::Node* ConstPool::Tree::get(const void* data) {
return nullptr;
}
void ConstPool::Tree::put(ConstPool::Node* newNode) {
void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept {
size_t dataSize = _dataSize;
_length++;
@@ -126,7 +126,7 @@ void ConstPool::Tree::put(ConstPool::Node* newNode) {
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
ConstPool::ConstPool(Zone* zone) {
ConstPool::ConstPool(Zone* zone) noexcept {
_zone = zone;
size_t dataSize = 1;
@@ -141,13 +141,13 @@ ConstPool::ConstPool(Zone* zone) {
_alignment = 0;
}
ConstPool::~ConstPool() {}
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
void ConstPool::reset() {
void ConstPool::reset() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_gaps[i] = nullptr;
@@ -162,7 +162,7 @@ void ConstPool::reset() {
// [asmjit::ConstPool - Ops]
// ============================================================================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) {
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (gap == nullptr)
return self->_zone->allocT<ConstPool::Gap>();
@@ -171,12 +171,12 @@ static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) {
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) {
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) {
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept {
ASMJIT_ASSERT(length > 0);
while (length > 0) {
@@ -222,7 +222,7 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) {
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) {
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
@@ -329,11 +329,11 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) {
// ============================================================================
struct ConstPoolFill {
ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) :
ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
_dst(dst),
_dataSize(dataSize) {}
ASMJIT_INLINE void visit(const ConstPool::Node* node) {
ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept {
if (!node->_shared)
::memcpy(_dst + node->_offset, node->getData(), _dataSize);
}
@@ -342,7 +342,7 @@ struct ConstPoolFill {
size_t _dataSize;
};
void ConstPool::fill(void* dst) const {
void ConstPool::fill(void* dst) const noexcept {
// Clears possible gaps, asmjit should never emit garbage to the output.
::memset(dst, 0, _size);

View File

@@ -65,7 +65,7 @@ struct ConstPool {
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE void* getData() const {
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<ConstPool::Node*>(this) + 1);
}
@@ -100,7 +100,7 @@ struct ConstPool {
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE Tree(size_t dataSize = 0)
ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept
: _root(nullptr),
_length(0),
_dataSize(dataSize) {}
@@ -110,7 +110,7 @@ struct ConstPool {
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() {
ASMJIT_INLINE void reset() noexcept {
_root = nullptr;
_length = 0;
}
@@ -119,10 +119,10 @@ struct ConstPool {
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const { return _length == 0; }
ASMJIT_INLINE size_t getLength() const { return _length; }
ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
ASMJIT_INLINE void setDataSize(size_t dataSize) {
ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(isEmpty());
_dataSize = dataSize;
}
@@ -131,15 +131,15 @@ struct ConstPool {
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_API Node* get(const void* data);
ASMJIT_API void put(Node* node);
ASMJIT_API Node* get(const void* data) noexcept;
ASMJIT_API void put(Node* node) noexcept;
// --------------------------------------------------------------------------
// [Iterate]
// --------------------------------------------------------------------------
template<typename Visitor>
ASMJIT_INLINE void iterate(Visitor& visitor) const {
ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept {
Node* node = const_cast<Node*>(_root);
Node* link;
@@ -182,7 +182,7 @@ struct ConstPool {
// [Helpers]
// --------------------------------------------------------------------------
static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) {
static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = zone->allocT<Node>(sizeof(Node) + size);
if (node == nullptr)
return nullptr;
@@ -213,25 +213,25 @@ struct ConstPool {
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API ConstPool(Zone* zone);
ASMJIT_API ~ConstPool();
ASMJIT_API ConstPool(Zone* zone) noexcept;
ASMJIT_API ~ConstPool() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_API void reset();
ASMJIT_API void reset() noexcept;
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Get whether the constant-pool is empty.
ASMJIT_INLINE bool isEmpty() const { return _size == 0; }
ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; }
//! Get the size of the constant-pool in bytes.
ASMJIT_INLINE size_t getSize() const { return _size; }
ASMJIT_INLINE size_t getSize() const noexcept { return _size; }
//! Get minimum alignment.
ASMJIT_INLINE size_t getAlignment() const { return _alignment; }
ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; }
//! Add a constant to the constant pool.
//!
@@ -250,14 +250,14 @@ struct ConstPool {
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset);
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
// --------------------------------------------------------------------------
// [Fill]
// --------------------------------------------------------------------------
//! Fill the destination with the constants from the pool.
ASMJIT_API void fill(void* dst) const;
ASMJIT_API void fill(void* dst) const noexcept;
// --------------------------------------------------------------------------
// [Members]

View File

@@ -17,113 +17,19 @@
namespace asmjit {
// ============================================================================
// [asmjit::PodVectorBase - NullData]
// [asmjit::StringBuilder - Construction / Destruction]
// ============================================================================
const PodVectorData PodVectorBase::_nullData = { 0, 0 };
// ============================================================================
// [asmjit::PodVectorBase - Reset]
// ============================================================================
//! Clear vector data and free internal buffer.
void PodVectorBase::reset(bool releaseMemory) {
PodVectorData* d = _d;
if (d == &_nullData)
return;
if (releaseMemory) {
ASMJIT_FREE(d);
_d = const_cast<PodVectorData*>(&_nullData);
return;
}
d->length = 0;
}
// ============================================================================
// [asmjit::PodVectorBase - Helpers]
// ============================================================================
Error PodVectorBase::_grow(size_t n, size_t sizeOfT) {
PodVectorData* d = _d;
size_t threshold = kMemAllocGrowMax / sizeOfT;
size_t capacity = d->capacity;
size_t after = d->length;
if (IntTraits<size_t>::maxValue() - n < after)
return kErrorNoHeapMemory;
after += n;
if (capacity >= after)
return kErrorOk;
// PodVector is used as a linear array for some data structures used by
// AsmJit code generation. The purpose of this agressive growing schema
// is to minimize memory reallocations, because AsmJit code generation
// classes live short life and will be freed or reused soon.
if (capacity < 32)
capacity = 32;
else if (capacity < 128)
capacity = 128;
else if (capacity < 512)
capacity = 512;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(capacity, sizeOfT);
}
Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) {
PodVectorData* d = _d;
if (d->capacity >= n)
return kErrorOk;
size_t nBytes = sizeof(PodVectorData) + n * sizeOfT;
if (nBytes < n)
return kErrorNoHeapMemory;
if (d == &_nullData) {
d = static_cast<PodVectorData*>(ASMJIT_ALLOC(nBytes));
if (d == nullptr)
return kErrorNoHeapMemory;
d->length = 0;
}
else {
d = static_cast<PodVectorData*>(ASMJIT_REALLOC(d, nBytes));
if (d == nullptr)
return kErrorNoHeapMemory;
}
d->capacity = n;
_d = d;
return kErrorOk;
}
// Should be placed in read-only memory.
static const char StringBuilder_empty[4] = { 0 };
// ============================================================================
// [asmjit::StringBuilder - Construction / Destruction]
// ============================================================================
StringBuilder::StringBuilder()
StringBuilder::StringBuilder() noexcept
: _data(const_cast<char*>(StringBuilder_empty)),
_length(0),
_capacity(0),
_canFree(false) {}
StringBuilder::~StringBuilder() {
StringBuilder::~StringBuilder() noexcept {
if (_canFree)
ASMJIT_FREE(_data);
}
@@ -132,7 +38,7 @@ StringBuilder::~StringBuilder() {
// [asmjit::StringBuilder - Prepare / Reserve]
// ============================================================================
char* StringBuilder::prepare(uint32_t op, size_t len) {
char* StringBuilder::prepare(uint32_t op, size_t len) noexcept {
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
@@ -231,7 +137,7 @@ char* StringBuilder::prepare(uint32_t op, size_t len) {
}
}
bool StringBuilder::reserve(size_t to) {
bool StringBuilder::reserve(size_t to) noexcept {
if (_capacity >= to)
return true;
@@ -258,7 +164,7 @@ bool StringBuilder::reserve(size_t to) {
// [asmjit::StringBuilder - Clear]
// ============================================================================
void StringBuilder::clear() {
void StringBuilder::clear() noexcept {
if (_data != StringBuilder_empty)
_data[0] = 0;
_length = 0;
@@ -268,7 +174,7 @@ void StringBuilder::clear() {
// [asmjit::StringBuilder - Methods]
// ============================================================================
bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) {
bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept {
if (len == kInvalidIndex)
len = str != nullptr ? ::strlen(str) : static_cast<size_t>(0);
@@ -280,7 +186,7 @@ bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) {
return true;
}
bool StringBuilder::_opChar(uint32_t op, char c) {
bool StringBuilder::_opChar(uint32_t op, char c) noexcept {
char* p = prepare(op, 1);
if (p == nullptr)
return false;
@@ -289,7 +195,7 @@ bool StringBuilder::_opChar(uint32_t op, char c) {
return true;
}
bool StringBuilder::_opChars(uint32_t op, char c, size_t len) {
bool StringBuilder::_opChars(uint32_t op, char c, size_t len) noexcept {
char* p = prepare(op, len);
if (p == nullptr)
return false;
@@ -300,7 +206,7 @@ bool StringBuilder::_opChars(uint32_t op, char c, size_t len) {
static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) {
bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
if (base < 2 || base > 36)
base = 10;
@@ -389,7 +295,7 @@ bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t wid
return true;
}
bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) {
bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept {
if (len >= IntTraits<size_t>::maxValue() / 2)
return false;
@@ -407,7 +313,7 @@ bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) {
return true;
}
bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) {
bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
char buf[1024];
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
@@ -416,7 +322,7 @@ bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) {
return _opString(op, buf);
}
bool StringBuilder::setFormat(const char* fmt, ...) {
bool StringBuilder::setFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
@@ -427,7 +333,7 @@ bool StringBuilder::setFormat(const char* fmt, ...) {
return result;
}
bool StringBuilder::appendFormat(const char* fmt, ...) {
bool StringBuilder::appendFormat(const char* fmt, ...) noexcept {
bool result;
va_list ap;
@@ -438,7 +344,7 @@ bool StringBuilder::appendFormat(const char* fmt, ...) {
return result;
}
bool StringBuilder::eq(const char* str, size_t len) const {
bool StringBuilder::eq(const char* str, size_t len) const noexcept {
const char* aData = _data;
const char* bData = str;

View File

@@ -40,15 +40,15 @@ struct BitArray {
// [Accessors]
// --------------------------------------------------------------------------
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const {
ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept {
return (data[index / kEntityBits] >> (index % kEntityBits)) & 1;
}
ASMJIT_INLINE void setBit(uint32_t index) {
ASMJIT_INLINE void setBit(uint32_t index) noexcept {
data[index / kEntityBits] |= static_cast<uintptr_t>(1) << (index % kEntityBits);
}
ASMJIT_INLINE void delBit(uint32_t index) {
ASMJIT_INLINE void delBit(uint32_t index) noexcept {
data[index / kEntityBits] &= ~(static_cast<uintptr_t>(1) << (index % kEntityBits));
}
@@ -57,7 +57,7 @@ struct BitArray {
// --------------------------------------------------------------------------
//! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`.
ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) {
ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i];
@@ -67,11 +67,11 @@ struct BitArray {
return r != 0;
}
ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) {
ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) noexcept {
return addBits(this, s0, len);
}
ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] | s1->data[i];
@@ -81,11 +81,11 @@ struct BitArray {
return r != 0;
}
ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) noexcept {
return andBits(this, s1, len);
}
ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & s1->data[i];
@@ -95,11 +95,11 @@ struct BitArray {
return r != 0;
}
ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) noexcept {
return delBits(this, s1, len);
}
ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t t = s0->data[i] & ~s1->data[i];
@@ -109,11 +109,11 @@ struct BitArray {
return r != 0;
}
ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) noexcept {
return _addBitsDelSource(this, s1, len);
}
ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) {
ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) noexcept {
uintptr_t r = 0;
for (uint32_t i = 0; i < len; i++) {
uintptr_t a = s0->data[i];
@@ -135,217 +135,6 @@ struct BitArray {
uintptr_t data[1];
};
// ============================================================================
// [asmjit::PodVectorData]
// ============================================================================
//! \internal
struct PodVectorData {
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get data.
ASMJIT_INLINE void* getData() const { return (void*)(this + 1); }
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Capacity of the vector.
size_t capacity;
//! Length of the vector.
size_t length;
};
// ============================================================================
// [asmjit::PodVectorBase]
// ============================================================================
//! \internal
struct PodVectorBase {
static ASMJIT_API const PodVectorData _nullData;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorBase`.
ASMJIT_INLINE PodVectorBase() : _d(const_cast<PodVectorData*>(&_nullData)) {}
//! Destroy the `PodVectorBase` and data.
ASMJIT_INLINE ~PodVectorBase() { reset(true); }
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Reset the vector data and set its `length` to zero.
//!
//! If `releaseMemory` is true the vector buffer will be released to the
//! system.
ASMJIT_API void reset(bool releaseMemory = false);
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
protected:
ASMJIT_API Error _grow(size_t n, size_t sizeOfT);
ASMJIT_API Error _reserve(size_t n, size_t sizeOfT);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
public:
PodVectorData* _d;
};
// ============================================================================
// [asmjit::PodVector<T>]
// ============================================================================
//! Template used to store and manage array of POD data.
//!
//! This template has these adventages over other vector<> templates:
//! - Non-copyable (designed to be non-copyable, we want it)
//! - No copy-on-write (some implementations of stl can use it)
//! - Optimized for working only with POD types
//! - Uses ASMJIT_... memory management macros
template <typename T>
struct PodVector : PodVectorBase {
ASMJIT_NO_COPY(PodVector<T>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVector<T>`.
ASMJIT_INLINE PodVector() {}
//! Destroy the `PodVector<>` and data.
ASMJIT_INLINE ~PodVector() {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
//! Get whether the vector is empty.
ASMJIT_INLINE bool isEmpty() const { return _d->length == 0; }
//! Get length.
ASMJIT_INLINE size_t getLength() const { return _d->length; }
//! Get capacity.
ASMJIT_INLINE size_t getCapacity() const { return _d->capacity; }
//! Get data.
ASMJIT_INLINE T* getData() { return static_cast<T*>(_d->getData()); }
//! \overload
ASMJIT_INLINE const T* getData() const { return static_cast<const T*>(_d->getData()); }
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
//! Called to grow the buffer to fit at least `n` elements more.
ASMJIT_INLINE Error _grow(size_t n) { return PodVectorBase::_grow(n, sizeof(T)); }
//! Realloc internal array to fit at least `n` items.
ASMJIT_INLINE Error _reserve(size_t n) { return PodVectorBase::_reserve(n, sizeof(T)); }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Prepend `item` to vector.
Error prepend(const T& item) {
PodVectorData* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
_d = d;
}
::memmove(static_cast<T*>(d->getData()) + 1, d->getData(), d->length * sizeof(T));
::memcpy(d->getData(), &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Insert an `item` at the `index`.
Error insert(size_t index, const T& item) {
PodVectorData* d = _d;
ASMJIT_ASSERT(index <= d->length);
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
T* dst = static_cast<T*>(d->getData()) + index;
::memmove(dst + 1, dst, d->length - index);
::memcpy(dst, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Append `item` to vector.
Error append(const T& item) {
PodVectorData* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
::memcpy(static_cast<T*>(d->getData()) + d->length, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Get index of `val` or `kInvalidIndex` if not found.
size_t indexOf(const T& val) const {
PodVectorData* d = _d;
const T* data = static_cast<const T*>(d->getData());
size_t len = d->length;
for (size_t i = 0; i < len; i++)
if (data[i] == val)
return i;
return kInvalidIndex;
}
//! Remove item at index `i`.
void removeAt(size_t i) {
PodVectorData* d = _d;
ASMJIT_ASSERT(i < d->length);
T* data = static_cast<T*>(d->getData()) + i;
d->length--;
::memmove(data, data + 1, d->length - i);
}
//! Swap this pod-vector with `other`.
void swap(PodVector<T>& other) {
T* otherData = other._d;
other._d = _d;
_d = otherData;
}
//! Get item at index `i`.
ASMJIT_INLINE T& operator[](size_t i) {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
//! Get item at index `i`.
ASMJIT_INLINE const T& operator[](size_t i) const {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
};
// ============================================================================
// [asmjit::PodList<T>]
// ============================================================================
@@ -365,12 +154,12 @@ struct PodList {
// --------------------------------------------------------------------------
//! Get next node.
ASMJIT_INLINE Link* getNext() const { return _next; }
ASMJIT_INLINE Link* getNext() const noexcept { return _next; }
//! Get value.
ASMJIT_INLINE T getValue() const { return _value; }
ASMJIT_INLINE T getValue() const noexcept { return _value; }
//! Set value to `value`.
ASMJIT_INLINE void setValue(const T& value) { _value = value; }
ASMJIT_INLINE void setValue(const T& value) noexcept { _value = value; }
// --------------------------------------------------------------------------
// [Members]
@@ -384,35 +173,35 @@ struct PodList {
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE PodList() : _first(nullptr), _last(nullptr) {}
ASMJIT_INLINE ~PodList() {}
ASMJIT_INLINE PodList() noexcept : _first(nullptr), _last(nullptr) {}
ASMJIT_INLINE ~PodList() noexcept {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool isEmpty() const { return _first != nullptr; }
ASMJIT_INLINE bool isEmpty() const noexcept { return _first != nullptr; }
ASMJIT_INLINE Link* getFirst() const { return _first; }
ASMJIT_INLINE Link* getLast() const { return _last; }
ASMJIT_INLINE Link* getFirst() const noexcept { return _first; }
ASMJIT_INLINE Link* getLast() const noexcept { return _last; }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() {
ASMJIT_INLINE void reset() noexcept {
_first = nullptr;
_last = nullptr;
}
ASMJIT_INLINE void prepend(Link* link) {
ASMJIT_INLINE void prepend(Link* link) noexcept {
link->_next = _first;
if (_first == nullptr)
_last = link;
_first = link;
}
ASMJIT_INLINE void append(Link* link) {
ASMJIT_INLINE void append(Link* link) noexcept {
link->_next = nullptr;
if (_first == nullptr)
_first = link;
@@ -472,92 +261,92 @@ struct StringBuilder {
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_API StringBuilder();
ASMJIT_API ~StringBuilder();
ASMJIT_API StringBuilder() noexcept;
ASMJIT_API ~StringBuilder() noexcept;
ASMJIT_INLINE StringBuilder(const _NoInit&) {}
ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {}
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get string builder capacity.
ASMJIT_INLINE size_t getCapacity() const { return _capacity; }
ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; }
//! Get length.
ASMJIT_INLINE size_t getLength() const { return _length; }
ASMJIT_INLINE size_t getLength() const noexcept { return _length; }
//! Get null-terminated string data.
ASMJIT_INLINE char* getData() { return _data; }
ASMJIT_INLINE char* getData() noexcept { return _data; }
//! Get null-terminated string data (const).
ASMJIT_INLINE const char* getData() const { return _data; }
ASMJIT_INLINE const char* getData() const noexcept { return _data; }
// --------------------------------------------------------------------------
// [Prepare / Reserve]
// --------------------------------------------------------------------------
//! Prepare to set/append.
ASMJIT_API char* prepare(uint32_t op, size_t len);
ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept;
//! Reserve `to` bytes in string builder.
ASMJIT_API bool reserve(size_t to);
ASMJIT_API bool reserve(size_t to) noexcept;
// --------------------------------------------------------------------------
// [Clear]
// --------------------------------------------------------------------------
//! Clear the content in String builder.
ASMJIT_API void clear();
ASMJIT_API void clear() noexcept;
// --------------------------------------------------------------------------
// [Op]
// --------------------------------------------------------------------------
ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex);
ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap);
ASMJIT_API bool _opChar(uint32_t op, char c);
ASMJIT_API bool _opChars(uint32_t op, char c, size_t len);
ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0);
ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len);
ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex) noexcept;
ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API bool _opChar(uint32_t op, char c) noexcept;
ASMJIT_API bool _opChars(uint32_t op, char c, size_t len) noexcept;
ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len) noexcept;
// --------------------------------------------------------------------------
// [Set]
// --------------------------------------------------------------------------
//! Replace the current content by `str` of `len`.
ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) {
ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpSet, str, len);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) {
ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpSet, fmt, ap);
}
//! Replace the current content by formatted string `fmt`.
ASMJIT_API bool setFormat(const char* fmt, ...);
ASMJIT_API bool setFormat(const char* fmt, ...) noexcept;
//! Replace the current content by `c` character.
ASMJIT_INLINE bool setChar(char c) {
ASMJIT_INLINE bool setChar(char c) noexcept {
return _opChar(kStringOpSet, c);
}
//! Replace the current content by `c` of `len`.
ASMJIT_INLINE bool setChars(char c, size_t len) {
ASMJIT_INLINE bool setChars(char c, size_t len) noexcept {
return _opChars(kStringOpSet, c, len);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) {
ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned);
}
//! Replace the current content by formatted integer `i`.
ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) {
ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpSet, i, base, width, flags);
}
//! Replace the current content by the given `data` converted to a HEX string.
ASMJIT_INLINE bool setHex(const void* data, size_t len) {
ASMJIT_INLINE bool setHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpSet, data, len);
}
@@ -566,40 +355,40 @@ struct StringBuilder {
// --------------------------------------------------------------------------
//! Append `str` of `len`.
ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) {
ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) noexcept {
return _opString(kStringOpAppend, str, len);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) {
ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kStringOpAppend, fmt, ap);
}
//! Append a formatted string `fmt` to the current content.
ASMJIT_API bool appendFormat(const char* fmt, ...);
ASMJIT_API bool appendFormat(const char* fmt, ...) noexcept;
//! Append `c` character.
ASMJIT_INLINE bool appendChar(char c) {
ASMJIT_INLINE bool appendChar(char c) noexcept {
return _opChar(kStringOpAppend, c);
}
//! Append `c` of `len`.
ASMJIT_INLINE bool appendChars(char c, size_t len) {
ASMJIT_INLINE bool appendChars(char c, size_t len) noexcept {
return _opChars(kStringOpAppend, c, len);
}
//! Append `i`.
ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) {
ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, static_cast<uint64_t>(i), base, width, flags | kStringFormatSigned);
}
//! Append `i`.
ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) {
ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kStringOpAppend, i, base, width, flags);
}
//! Append the given `data` converted to a HEX string.
ASMJIT_INLINE bool appendHex(const void* data, size_t len) {
ASMJIT_INLINE bool appendHex(const void* data, size_t len) noexcept {
return _opHex(kStringOpAppend, data, len);
}
@@ -608,7 +397,7 @@ struct StringBuilder {
// --------------------------------------------------------------------------
//! Append `str` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) {
ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) noexcept {
// len should be a constant if we are inlining.
if (len == kInvalidIndex) {
char* p = &_data[_length];
@@ -636,7 +425,7 @@ struct StringBuilder {
}
//! Append `c` character, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChar(char c) {
ASMJIT_INLINE void _appendChar(char c) noexcept {
ASMJIT_ASSERT(_capacity - _length >= 1);
_data[_length] = c;
@@ -645,7 +434,7 @@ struct StringBuilder {
}
//! Append `c` of `len`, inlined, without buffer overflow check.
ASMJIT_INLINE void _appendChars(char c, size_t len) {
ASMJIT_INLINE void _appendChars(char c, size_t len) noexcept {
ASMJIT_ASSERT(_capacity - _length >= len);
char* p = &_data[_length];
@@ -658,7 +447,7 @@ struct StringBuilder {
_length += len;
}
ASMJIT_INLINE void _appendUInt32(uint32_t i) {
ASMJIT_INLINE void _appendUInt32(uint32_t i) noexcept {
char buf_[32];
char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_);
@@ -688,19 +477,19 @@ struct StringBuilder {
// --------------------------------------------------------------------------
//! Check for equality with other `str` of `len`.
ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const;
ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const noexcept;
//! Check for equality with `other`.
ASMJIT_INLINE bool eq(const StringBuilder& other) const { return eq(other._data); }
ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data); }
// --------------------------------------------------------------------------
// [Operator Overload]
// --------------------------------------------------------------------------
ASMJIT_INLINE bool operator==(const StringBuilder& other) const { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const { return !eq(other); }
ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); }
ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); }
ASMJIT_INLINE bool operator==(const char* str) const { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const { return !eq(str); }
ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); }
ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); }
// --------------------------------------------------------------------------
// [Members]
@@ -729,7 +518,7 @@ struct StringBuilderTmp : public StringBuilder {
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE StringBuilderTmp() : StringBuilder(NoInit) {
ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) {
_data = _embeddedData;
_data[0] = 0;

View File

@@ -9,14 +9,8 @@
// [Dependencies - AsmJit]
#include "../base/cpuinfo.h"
#include "../base/utils.h"
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
#include "../x86/x86cpuinfo.h"
#else
// ?
#endif
// [Dependencies - Posix]
#if ASMJIT_OS_POSIX
# include <errno.h>
# include <sys/statvfs.h>
@@ -24,53 +18,618 @@
# include <unistd.h>
#endif // ASMJIT_OS_POSIX
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
# if ASMJIT_CC_MSC_GE(14, 0, 0)
# include <intrin.h> // Required by `__cpuid()` and `_xgetbv()`.
# endif // _MSC_VER >= 1400
#endif
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
# if ASMJIT_OS_LINUX
# include <sys/auxv.h> // Required by `getauxval()`.
# endif
#endif
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::CpuInfo - DetectHwThreadsCount]
// [asmjit::CpuInfo - Detect ARM & ARM64]
// ============================================================================
uint32_t CpuInfo::detectHwThreadsCount() {
// ARM information has to be retrieved by the OS (this is how ARM was designed).
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_ARM64
static void armPopulateBaseline64Features(CpuInfo* cpuInfo) noexcept {
// Thumb (including all variations) is only supported on ARM32.
// ARM64 is based on ARMv8 and newer.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
// ARM64 comes with these features by default.
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
cpuInfo->addFeature(CpuInfo::kArmFeatureIDIV);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP4);
}
#endif // ASMJIT_ARCH_ARM64
#if ASMJIT_OS_WINDOWS
//! \internal
//!
//! Detect ARM CPU features on Windows.
//!
//! The detection is based on `IsProcessorFeaturePresent()` API call.
static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
// Windows for ARM requires at least ARMv7 with DSP extensions.
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
cpuInfo->addFeature(CpuInfo::kArmFeatureDSP);
// Windows for ARM requires VFP3.
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3);
// Windows for ARM requires and uses THUMB2.
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB);
cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2);
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
#endif
// Windows for ARM requires NEON.
cpuInfo->addFeature(CpuInfo::kArmFeatureNEON);
// Detect additional CPU features by calling `IsProcessorFeaturePresent()`.
struct WinPFPMapping {
uint32_t pfpId, featureId;
};
static const WinPFPMapping mapping[] = {
{ PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFP4 },
{ PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 },
{ PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIV },
{ PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 }
};
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++)
if (::IsProcessorFeaturePresent(mapping[i].pfpId))
cpuInfo->addFeature(mapping[i].featureId);
}
#endif // ASMJIT_OS_WINDOWS
#if ASMJIT_OS_LINUX
struct LinuxHWCapMapping {
uint32_t hwcapMask, featureId;
};
static void armDetectHWCaps(CpuInfo* cpuInfo,
unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept {
unsigned long mask = getauxval(type);
for (size_t i = 0; i < length; i++)
if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask)
cpuInfo->addFeature(mapping[i].featureId);
}
//! \internal
//!
//! Detect ARM CPU features on Linux.
//!
//! The detection is based on `getauxval()`.
static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_ARCH_ARM32
cpuInfo->setArch(kArchArm32);
// `AT_HWCAP` provides ARMv7 (and less) related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFP3 },
{ /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFP4 },
{ /* HWCAP_IDIVA */ (3 << 17), CpuInfo::kArmFeatureIDIV },
{ /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 },
{ /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureDSP }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// VFP3 implies VFP2.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3))
cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2);
// VFP2 implies ARMv6.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP2))
cpuInfo->addFeature(CpuInfo::kArmFeatureV6);
// VFP3 or NEON implies ARMv7.
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureNEON))
cpuInfo->addFeature(CpuInfo::kArmFeatureV7);
// `AT_HWCAP2` provides ARMv8 related flags.
static const LinuxHWCapMapping hwCap2Mapping[] = {
{ /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES },
{ /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCapMapping2));
if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) ||
cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) {
cpuInfo->addFeature(CpuInfo::kArmFeatureV8);
}
#else
cpuInfo->setArch(kArchArm64);
armPopulateBaseline64Features(cpuInfo);
// `AT_HWCAP` provides ARMv8 related flags.
static const LinuxHWCapMapping hwCapMapping[] = {
{ /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureNEON },
{ /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES },
{ /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 },
{ /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL },
{ /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 },
{ /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 }
{ /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 }
};
armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping));
// `AT_HWCAP2` is not used at the moment.
#endif
}
#endif // ASMJIT_OS_LINUX
static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept {
#if ASMJIT_OS_WINDOWS
armDetectCpuInfoOnWindows(cpuInfo);
#elif ASMJIT_OS_LINUX
armDetectCpuInfoOnLinux(cpuInfo);
#else
# error "[asmjit] armDetectCpuInfo() - Unsupported OS."
#endif
}
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
// ============================================================================
// [asmjit::CpuInfo - Detect X86 & X64]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
//! \internal
//!
//! X86 CPUID result.
struct CpuIdResult {
uint32_t eax, ebx, ecx, edx;
};
//! \internal
//!
//! Content of XCR register, result of XGETBV instruction.
struct XGetBVResult {
uint32_t eax, edx;
};
#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64
//! \internal
//!
//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However,
//! 64-bit calling convention specifies the first parameter to be passed in
//! ECX, so we may be lucky if compiler doesn't move the register, otherwise
//! the result would be wrong.
static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept {
__cpuid(reinterpret_cast<int*>(result), inEax);
}
#endif
//! \internal
//!
//! Wrapper to call `cpuid` instruction.
static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept {
#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729)
__cpuidex(reinterpret_cast<int*>(result), inEax, inEcx);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64
x86CallCpuIdWorkaround(inEcx, inEax, result);
#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86
uint32_t paramEax = inEax;
uint32_t paramEcx = inEcx;
uint32_t* out = reinterpret_cast<uint32_t*>(result);
__asm {
mov eax, paramEax
mov ecx, paramEcx
mov edi, out
cpuid
mov dword ptr[edi + 0], eax
mov dword ptr[edi + 4], ebx
mov dword ptr[edi + 8], ecx
mov dword ptr[edi + 12], edx
}
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86
__asm__ __volatile__(
"mov %%ebx, %%edi\n"
"cpuid\n"
"xchg %%edi, %%ebx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx)
);
#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X64
__asm__ __volatile__( \
"mov %%rbx, %%rdi\n"
"cpuid\n"
"xchg %%rdi, %%rbx\n"
: "=a"(result->eax),
"=D"(result->ebx),
"=c"(result->ecx),
"=d"(result->edx)
: "a"(inEax),
"c"(inEcx)
);
#else
# error "[asmjit] x86CallCpuid() - Unsupported compiler."
#endif
}
//! \internal
//!
//! Wrapper to call `xgetbv` instruction.
static void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept {
#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+
uint64_t value = _xgetbv(inEcx);
result->eax = static_cast<uint32_t>(value & 0xFFFFFFFFU);
result->edx = static_cast<uint32_t>(value >> 32);
#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG
uint32_t outEax;
uint32_t outEdx;
// Replaced, because the world is not perfect:
// __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
__asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx));
result->eax = outEax;
result->edx = outEdx;
#else
result->eax = 0;
result->edx = 0;
#endif
}
//! \internal
//!
//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID.
static uint32_t x86GetCpuVendorID(const char* vendorString) noexcept {
struct VendorData {
uint32_t id;
char text[12];
};
static const VendorData vendorList[] = {
{ CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } },
{ CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } },
{ CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } },
{ CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } }
};
uint32_t dw0 = reinterpret_cast<const uint32_t*>(vendorString)[0];
uint32_t dw1 = reinterpret_cast<const uint32_t*>(vendorString)[1];
uint32_t dw2 = reinterpret_cast<const uint32_t*>(vendorString)[2];
for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) {
if (dw0 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[0] &&
dw1 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[1] &&
dw2 == reinterpret_cast<const uint32_t*>(vendorList[i].text)[2])
return vendorList[i].id;
}
return CpuInfo::kVendorNone;
}
static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept {
// Used to always clear the current character to ensure that the result
// doesn't contain garbage after the new zero terminator.
char* d = s;
char prev = 0;
char curr = s[0];
s[0] = '\0';
for (;;) {
if (curr == 0)
break;
if (curr == ' ') {
if (prev == '@' || s[1] == ' ' || s[1] == '@')
goto _Skip;
}
d[0] = curr;
d++;
prev = curr;
_Skip:
curr = *++s;
s[0] = '\0';
}
d[0] = '\0';
}
static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept {
uint32_t i, maxId;
CpuIdResult regs;
XGetBVResult xcr0 = { 0, 0 };
// Architecture is known at compile-time.
cpuInfo->setArch(ASMJIT_ARCH_X86 ? kArchX86 : kArchX64);
// --------------------------------------------------------------------------
// [CPUID EAX=0x0]
// --------------------------------------------------------------------------
// Get vendor string/id.
x86CallCpuId(&regs, 0x0);
maxId = regs.eax;
::memcpy(cpuInfo->_vendorString + 0, &regs.ebx, 4);
::memcpy(cpuInfo->_vendorString + 4, &regs.edx, 4);
::memcpy(cpuInfo->_vendorString + 8, &regs.ecx, 4);
cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString);
// --------------------------------------------------------------------------
// [CPUID EAX=0x1]
// --------------------------------------------------------------------------
if (maxId >= 0x1) {
// Get feature flags in ECX/EDX and family/model in EAX.
x86CallCpuId(&regs, 0x1);
// Fill family and model fields.
cpuInfo->_family = (regs.eax >> 8) & 0x0F;
cpuInfo->_model = (regs.eax >> 4) & 0x0F;
cpuInfo->_stepping = (regs.eax ) & 0x0F;
// Use extended family and model fields.
if (cpuInfo->_family == 0x0F) {
cpuInfo->_family += ((regs.eax >> 20) & 0xFF);
cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4;
}
cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03);
cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF);
cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ);
if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR);
if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3);
if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B);
if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1);
if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2);
if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE);
if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT);
if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI);
if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE);
if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE_OS);
if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND);
if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC);
if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B);
if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV);
if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH);
if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX);
if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR);
if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE)
.addFeature(CpuInfo::kX86FeatureSSE2);
if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT);
// AMD sets Multithreading to ON if it has two or more cores.
if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == CpuInfo::kVendorAMD && (regs.edx & 0x10000000U)) {
cpuInfo->_hwThreadsCount = 2;
}
// Get the content of XCR0 if supported by CPU and enabled by OS.
if ((regs.ecx & 0x0C000000U) == 0x0C000000U) {
x86CallXGetBV(&xcr0, 0);
}
// Detect AVX+.
if (regs.ecx & 0x10000000U) {
// - XCR0[2:1] == 11b
// XMM & YMM states need to be enabled by OS.
if ((xcr0.eax & 0x00000006U) == 0x00000006U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX);
if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP);
if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA3);
if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4);
if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x7 ECX=0x0]
// --------------------------------------------------------------------------
// Detect new features if the processor supports CPUID-07.
bool maybeMPX = false;
if (maxId >= 0x7) {
x86CallCpuId(&regs, 0x7);
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE);
if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI);
if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE);
if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2);
if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVSBSTOSB_OPT);
if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM);
if (regs.ebx & 0x00004000U) maybeMPX = true;
if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED);
if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX);
if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH_OPT);
if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1);
// Detect AVX2.
if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) {
if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2);
}
// Detect AVX-512+.
if (regs.ebx & 0x00010000U) {
// - XCR0[2:1] == 11b
// XMM/YMM states need to be enabled by OS.
// - XCR0[7:5] == 111b
// Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by OS.
if ((xcr0.eax & 0x00000076U) == 0x00000076U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512F);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512DQ);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512PF);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512ER);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512CD);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VL);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0xD, ECX=0x0]
// --------------------------------------------------------------------------
if (maxId >= 0xD && maybeMPX) {
x86CallCpuId(&regs, 0xD);
// Both CPUID result and XCR0 has to be enabled to have support for MPX.
if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U) {
cpuInfo->addFeature(CpuInfo::kX86FeatureMPX);
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x80000000...maxId]
// --------------------------------------------------------------------------
// Several CPUID calls are required to get the whole branc string. It's easy
// to copy one DWORD at a time instead of performing a byte copy.
uint32_t* brand = reinterpret_cast<uint32_t*>(cpuInfo->_brandString);
i = maxId = 0x80000000U;
do {
x86CallCpuId(&regs, i);
switch (i) {
case 0x80000000U:
maxId = Utils::iMin<uint32_t>(regs.eax, 0x80000004);
break;
case 0x80000001U:
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHF_SAHF);
if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT);
if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A);
if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE);
if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCH);
if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX);
if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR_OPT);
if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP);
if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2)
.addFeature(CpuInfo::kX86FeatureMMX2);
if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW);
break;
case 0x80000002U:
case 0x80000003U:
case 0x80000004U:
*brand++ = regs.eax;
*brand++ = regs.ebx;
*brand++ = regs.ecx;
*brand++ = regs.edx;
break;
default:
// Stop the loop, additional features can be detected in the future.
i = maxId;
break;
}
} while (i++ < maxId);
// Simplify CPU brand string by removing unnecessary spaces.
x86SimplifyBrandString(cpuInfo->_brandString);
}
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// ============================================================================
// [asmjit::CpuInfo - Detect - HWThreadsCount]
// ============================================================================
static uint32_t cpuDetectHWThreadsCount() noexcept {
#if ASMJIT_OS_WINDOWS
SYSTEM_INFO info;
::GetSystemInfo(&info);
return info.dwNumberOfProcessors;
#elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN)
// It seems that sysconf returns the number of "logical" processors on both
// mac and linux. So we get the number of "online logical" processors.
long res = ::sysconf(_SC_NPROCESSORS_ONLN);
if (res == -1) return 1;
if (res <= 0) return 1;
return static_cast<uint32_t>(res);
#else
return 1;
#endif
}
// ============================================================================
// [asmjit::CpuInfo - Detect]
// ============================================================================
void CpuInfo::detect() noexcept {
reset();
// Detect the number of hardware threads available.
_hwThreadsCount = cpuDetectHWThreadsCount();
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
armDetectCpuInfo(this);
#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
x86DetectCpuInfo(this);
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
}
// ============================================================================
// [asmjit::CpuInfo - GetHost]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
struct AutoX86CpuInfo : public X86CpuInfo {
ASMJIT_INLINE AutoX86CpuInfo() : X86CpuInfo() {
X86CpuUtil::detect(this);
}
struct HostCpuInfo : public CpuInfo {
ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); }
};
#else
#error "[asmjit] Unsupported CPU."
#endif
const CpuInfo* CpuInfo::getHost() {
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
static AutoX86CpuInfo cpuInfo;
#else
#error "[asmjit] Unsupported CPU."
#endif
return &cpuInfo;
const CpuInfo& CpuInfo::getHost() noexcept {
static HostCpuInfo host;
return host;
}
} // asmjit namespace

View File

@@ -19,107 +19,357 @@ namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::CpuVendor]
// ============================================================================
//! Cpu vendor ID.
//!
//! Vendor IDs are specific to AsmJit library. During the library initialization
//! AsmJit checks host CPU and tries to identify the vendor based on the CPUID
//! calls. Some manufacturers changed their vendor strings and AsmJit is aware
//! of that - it checks multiple combinations and decides which vendor ID should
//! be used.
ASMJIT_ENUM(CpuVendor) {
//! No/Unknown vendor.
kCpuVendorNone = 0,
//! Intel vendor.
kCpuVendorIntel = 1,
//! AMD vendor.
kCpuVendorAMD = 2,
//! VIA vendor.
kCpuVendorVIA = 3
};
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! Base cpu information.
//! CPU information.
struct CpuInfo {
ASMJIT_NO_COPY(CpuInfo)
// --------------------------------------------------------------------------
// [Vendor]
// --------------------------------------------------------------------------
//! CPU vendor ID.
ASMJIT_ENUM(Vendor) {
//! Generic or unknown.
kVendorNone = 0,
//! Intel vendor.
kVendorIntel = 1,
//! AMD vendor.
kVendorAMD = 2,
//! VIA vendor.
kVendorVIA = 3
};
// --------------------------------------------------------------------------
// [ArmFeatures]
// --------------------------------------------------------------------------
//! ARM/ARM64 CPU features.
ASMJIT_ENUM(ArmFeatures) {
//! ARMv6 instruction set.
kArmFeatureV6,
//! ARMv7 instruction set.
kArmFeatureV7,
//! ARMv8 instruction set.
kArmFeatureV8,
//! CPU provides THUMB v1 instruction set (ARM only).
kArmFeatureTHUMB,
//! CPU provides THUMB v2 instruction set (ARM only).
kArmFeatureTHUMB2,
//! CPU provides VFPv2 instruction set.
kArmFeatureVFP2,
//! CPU provides VFPv3 instruction set.
kArmFeatureVFP3,
//! CPU provides VFPv4 instruction set.
kArmFeatureVFP4,
//! CPU provides 32 VFP-D (64-bit) registers.
kArmFeatureVFP_D32,
//! CPU provides NEON instruction set.
kArmFeatureNEON,
//! CPU provides DSP extensions.
kArmFeatureDSP,
//! CPU provides hardware support for SDIV and UDIV.
kArmFeatureIDIV,
//! CPU provides AES instructions (ARM64 only).
kArmFeatureAES,
//! CPU provides CRC32 instructions (ARM64 only).
kArmFeatureCRC32,
//! CPU provides PMULL instructions (ARM64 only).
kArmFeaturePMULL,
//! CPU provides SHA1 instructions (ARM64 only).
kArmFeatureSHA1,
//! CPU provides SHA256 instructions (ARM64 only).
kArmFeatureSHA256,
//! CPU provides 64-bit load/store atomics (ARM64 only).
kArmFeatureAtomics64,
//! Count of ARM/ARM64 CPU features.
kArmFeaturesCount
};
// --------------------------------------------------------------------------
// [X86Features]
// --------------------------------------------------------------------------
//! X86/X64 CPU features.
ASMJIT_ENUM(X86Features) {
//! Cpu has Not-Execute-Bit.
kX86FeatureNX = 0,
//! Cpu has multithreading.
kX86FeatureMT,
//! Cpu has RDTSC.
kX86FeatureRDTSC,
//! Cpu has RDTSCP.
kX86FeatureRDTSCP,
//! Cpu has CMOV.
kX86FeatureCMOV,
//! Cpu has CMPXCHG8B.
kX86FeatureCMPXCHG8B,
//! Cpu has CMPXCHG16B (X64).
kX86FeatureCMPXCHG16B,
//! Cpu has CLFUSH.
kX86FeatureCLFLUSH,
//! Cpu has CLFUSH (Optimized).
kX86FeatureCLFLUSH_OPT,
//! Cpu has PREFETCH.
kX86FeaturePREFETCH,
//! Cpu has PREFETCHWT1.
kX86FeaturePREFETCHWT1,
//! Cpu has LAHF/SAHF.
kX86FeatureLAHF_SAHF,
//! Cpu has FXSAVE/FXRSTOR.
kX86FeatureFXSR,
//! Cpu has FXSAVE/FXRSTOR (Optimized).
kX86FeatureFXSR_OPT,
//! Cpu has MMX.
kX86FeatureMMX,
//! Cpu has extended MMX.
kX86FeatureMMX2,
//! Cpu has 3dNow!
kX86Feature3DNOW,
//! Cpu has enchanced 3dNow!
kX86Feature3DNOW2,
//! Cpu has SSE.
kX86FeatureSSE,
//! Cpu has SSE2.
kX86FeatureSSE2,
//! Cpu has SSE3.
kX86FeatureSSE3,
//! Cpu has SSSE3.
kX86FeatureSSSE3,
//! Cpu has SSE4.A.
kX86FeatureSSE4A,
//! Cpu has SSE4.1.
kX86FeatureSSE4_1,
//! Cpu has SSE4.2.
kX86FeatureSSE4_2,
//! Cpu has Misaligned SSE (MSSE).
kX86FeatureMSSE,
//! Cpu has MONITOR and MWAIT.
kX86FeatureMONITOR,
//! Cpu has MOVBE.
kX86FeatureMOVBE,
//! Cpu has POPCNT.
kX86FeaturePOPCNT,
//! Cpu has LZCNT.
kX86FeatureLZCNT,
//! Cpu has AESNI.
kX86FeatureAESNI,
//! Cpu has PCLMULQDQ.
kX86FeaturePCLMULQDQ,
//! Cpu has RDRAND.
kX86FeatureRDRAND,
//! Cpu has RDSEED.
kX86FeatureRDSEED,
//! Cpu has SHA-1 and SHA-256.
kX86FeatureSHA,
//! Cpu has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0.
kX86FeatureXSAVE,
//! OS has enabled XSAVE, you can call XGETBV to get value of XCR0.
kX86FeatureXSAVE_OS,
//! Cpu has AVX.
kX86FeatureAVX,
//! Cpu has AVX2.
kX86FeatureAVX2,
//! Cpu has F16C.
kX86FeatureF16C,
//! Cpu has FMA3.
kX86FeatureFMA3,
//! Cpu has FMA4.
kX86FeatureFMA4,
//! Cpu has XOP.
kX86FeatureXOP,
//! Cpu has BMI.
kX86FeatureBMI,
//! Cpu has BMI2.
kX86FeatureBMI2,
//! Cpu has HLE.
kX86FeatureHLE,
//! Cpu has RTM.
kX86FeatureRTM,
//! Cpu has ADX.
kX86FeatureADX,
//! Cpu has MPX (Memory Protection Extensions).
kX86FeatureMPX,
//! Cpu has FSGSBASE.
kX86FeatureFSGSBASE,
//! Cpu has optimized REP MOVSB/STOSB.
kX86FeatureMOVSBSTOSB_OPT,
//! Cpu has AVX-512F (Foundation).
kX86FeatureAVX512F,
//! Cpu has AVX-512CD (Conflict Detection).
kX86FeatureAVX512CD,
//! Cpu has AVX-512PF (Prefetch Instructions).
kX86FeatureAVX512PF,
//! Cpu has AVX-512ER (Exponential and Reciprocal Instructions).
kX86FeatureAVX512ER,
//! Cpu has AVX-512DQ (DWord/QWord).
kX86FeatureAVX512DQ,
//! Cpu has AVX-512BW (Byte/Word).
kX86FeatureAVX512BW,
//! Cpu has AVX VL (Vector Length Excensions).
kX86FeatureAVX512VL,
//! Count of X86/X64 CPU features.
kX86FeaturesCount
};
// --------------------------------------------------------------------------
// [Other]
// --------------------------------------------------------------------------
//! \internal
enum {
kFeaturesPerUInt32 = static_cast<int>(sizeof(uint32_t)) * 8
};
// --------------------------------------------------------------------------
// [ArmInfo]
// --------------------------------------------------------------------------
struct ArmData {
};
// --------------------------------------------------------------------------
// [X86Info]
// --------------------------------------------------------------------------
struct X86Data {
//! Processor type.
uint32_t _processorType;
//! Brand index.
uint32_t _brandIndex;
//! Flush cache line size in bytes.
uint32_t _flushCacheLineSize;
//! Maximum number of addressable IDs for logical processors.
uint32_t _maxLogicalProcessors;
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE CpuInfo(uint32_t size) : _size(size) {}
ASMJIT_INLINE CpuInfo() noexcept { reset(); }
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
ASMJIT_INLINE void reset() noexcept {
::memset(this, 0, sizeof(CpuInfo));
}
// --------------------------------------------------------------------------
// [Detect]
// --------------------------------------------------------------------------
ASMJIT_API void detect() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get CPU architecture, see \Arch.
ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; }
//! Set CPU architecture, see \Arch.
ASMJIT_INLINE void setArch(uint32_t arch) noexcept { _arch = static_cast<uint8_t>(arch); }
//! Get CPU vendor string.
ASMJIT_INLINE const char* getVendorString() const { return _vendorString; }
ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; }
//! Get CPU brand string.
ASMJIT_INLINE const char* getBrandString() const { return _brandString; }
ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; }
//! Get CPU vendor ID.
ASMJIT_INLINE uint32_t getVendorId() const { return _vendorId; }
ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; }
//! Get CPU family ID.
ASMJIT_INLINE uint32_t getFamily() const { return _family; }
ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; }
//! Get CPU model ID.
ASMJIT_INLINE uint32_t getModel() const { return _model; }
ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; }
//! Get CPU stepping.
ASMJIT_INLINE uint32_t getStepping() const { return _stepping; }
ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; }
//! Get number of hardware threads available.
ASMJIT_INLINE uint32_t getHwThreadsCount() const { return _hwThreadsCount; }
ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept {
return _hwThreadsCount;
}
//! Get whether CPU has a `feature`.
ASMJIT_INLINE bool hasFeature(uint32_t feature) const {
ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept {
ASMJIT_ASSERT(feature < sizeof(_features) * 8);
return static_cast<bool>(
(_features[feature / kFeaturesPerUInt32] >> (feature % kFeaturesPerUInt32)) & 0x1);
uint32_t pos = feature / kFeaturesPerUInt32;
uint32_t bit = feature % kFeaturesPerUInt32;
return static_cast<bool>((_features[pos] >> bit) & 0x1);
}
//! Add a CPU `feature`.
ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) {
ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept {
ASMJIT_ASSERT(feature < sizeof(_features) * 8);
_features[feature / kFeaturesPerUInt32] |= (1U << (feature % kFeaturesPerUInt32));
uint32_t pos = feature / kFeaturesPerUInt32;
uint32_t bit = feature % kFeaturesPerUInt32;
_features[pos] |= static_cast<uint32_t>(1) << bit;
return *this;
}
// --------------------------------------------------------------------------
// [Accessors - ARM]
// --------------------------------------------------------------------------
// --------------------------------------------------------------------------
// [Accessors - X86]
// --------------------------------------------------------------------------
//! Get processor type.
ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept {
return _x86Data._processorType;
}
//! Get brand index.
ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept {
return _x86Data._brandIndex;
}
//! Get flush cache line size.
ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept {
return _x86Data._flushCacheLineSize;
}
//! Get maximum logical processors count.
ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept {
return _x86Data._maxLogicalProcessors;
}
// --------------------------------------------------------------------------
// [Statics]
// --------------------------------------------------------------------------
//! Detect the number of hardware threads.
static ASMJIT_API uint32_t detectHwThreadsCount();
//! Get host cpu.
static ASMJIT_API const CpuInfo* getHost();
static ASMJIT_API const CpuInfo& getHost() noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Size of the structure in bytes.
uint32_t _size;
//! Cpu short vendor string.
//! Cpu vendor string.
char _vendorString[16];
//! Cpu long vendor string (brand).
//! Cpu brand string.
char _brandString[64];
//! CPU architecture, see \ref Arch.
uint8_t _arch;
//! \internal
uint8_t _reserved[3];
//! Cpu vendor id, see \ref CpuVendor.
uint32_t _vendorId;
//! Cpu family ID.
@@ -134,6 +384,12 @@ struct CpuInfo {
//! Cpu features bitfield.
uint32_t _features[4];
// Architecture specific data.
union {
ArmData _armData;
X86Data _x86Data;
};
};
//! \}

View File

@@ -19,7 +19,7 @@ namespace asmjit {
// [asmjit::DebugUtils]
// ============================================================================
#if !defined(ASMJIT_DISABLE_NAMES)
#if !defined(ASMJIT_DISABLE_TEXT)
static const char errorMessages[] = {
"Ok\0"
"No heap memory\0"
@@ -39,7 +39,7 @@ static const char errorMessages[] = {
"Unknown error\0"
};
static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) {
static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) noexcept {
uint32_t i = 0;
if (id > maxId)
@@ -55,10 +55,10 @@ static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId)
return p;
}
#endif // ASMJIT_DISABLE_NAMES
#endif // ASMJIT_DISABLE_TEXT
const char* DebugUtils::errorAsString(Error e) {
#if !defined(ASMJIT_DISABLE_NAMES)
const char* DebugUtils::errorAsString(Error e) noexcept {
#if !defined(ASMJIT_DISABLE_TEXT)
return findPackedString(errorMessages, e, kErrorCount);
#else
static const char noMessage[] = "";
@@ -66,7 +66,7 @@ const char* DebugUtils::errorAsString(Error e) {
#endif
}
void DebugUtils::debugOutput(const char* str) {
void DebugUtils::debugOutput(const char* str) noexcept {
#if ASMJIT_OS_WINDOWS
::OutputDebugStringA(str);
#else
@@ -74,7 +74,7 @@ void DebugUtils::debugOutput(const char* str) {
#endif
}
void DebugUtils::assertionFailed(const char* file, int line, const char* msg) {
void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,

View File

@@ -89,27 +89,25 @@ ASMJIT_ENUM(ArchId) {
kArchX86 = 1,
//! X64 architecture (64-bit), also called AMD64.
kArchX64 = 2,
//! X32 architecture (64-bit with 32-bit pointers) (NOT USED ATM).
kArchX32 = 3,
//! Arm architecture (32-bit).
kArchArm = 4,
kArchArm32 = 4,
//! Arm64 architecture (64-bit).
kArchArm64 = 5,
#if ASMJIT_ARCH_X86
kArchHost = kArchX86,
kArchHost = kArchX86
#elif ASMJIT_ARCH_X64
kArchHost = kArchX64,
#elif ASMJIT_ARCH_ARM
kArchHost = kArchArm,
kArchHost = kArchX64
#elif ASMJIT_ARCH_ARM32
kArchHost = kArchArm32
#elif ASMJIT_ARCH_ARM64
kArchHost = kArchArm64,
kArchHost = kArchArm64
#else
# error "[asmjit] Unsupported host architecture."
#endif
//! Whether the host is 64-bit.
kArchHost64Bit = sizeof(intptr_t) >= 8
};
// ============================================================================
@@ -184,8 +182,8 @@ ASMJIT_ENUM(ErrorCode) {
//! Illegal (unencodable) displacement used.
//!
//! X86/X64
//! -------
//! X86/X64 Specific
//! ----------------
//!
//! Short form of jump instruction has been used, but the displacement is out
//! of bounds.
@@ -219,14 +217,14 @@ static const _NoInit NoInit = {};
namespace DebugUtils {
//! Get a printable version of AsmJit `Error` code.
ASMJIT_API const char* errorAsString(Error code);
ASMJIT_API const char* errorAsString(Error code) noexcept;
//! \addtogroup asmjit_base
//! \{
//! Called in debug build to output a debugging message caused by assertion
//! failure or tracing.
ASMJIT_API void debugOutput(const char* str);
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called in debug build on assertion failure.
//!
@@ -237,7 +235,7 @@ ASMJIT_API void debugOutput(const char* str);
//! If you have problems with assertions put a breakpoint at assertionFailed()
//! function (asmjit/base/globals.cpp) and check the call stack to locate the
//! failing code.
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg);
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
//! \}
@@ -297,7 +295,7 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
//! cross-platform software with various compiler support, consider using
//! `asmjit_cast<>` instead of `reinterpret_cast<>`.
template<typename T, typename Z>
static ASMJIT_INLINE T asmjit_cast(Z* p) { return (T)p; }
static ASMJIT_INLINE T asmjit_cast(Z* p) noexcept { return (T)p; }
//! \}

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ namespace asmjit {
// [asmjit::LogUtil]
// ============================================================================
bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) {
bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept {
size_t currentLen = sb.getLength();
size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0;
@@ -82,18 +82,18 @@ bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLe
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
Logger::Logger() {
Logger::Logger() noexcept {
_options = 0;
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
}
Logger::~Logger() {}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
void Logger::logFormat(uint32_t style, const char* fmt, ...) {
void Logger::logFormat(uint32_t style, const char* fmt, ...) noexcept {
char buf[1024];
size_t len;
@@ -108,7 +108,7 @@ void Logger::logFormat(uint32_t style, const char* fmt, ...) {
logString(style, buf, len);
}
void Logger::logBinary(uint32_t style, const void* data, size_t size) {
void Logger::logBinary(uint32_t style, const void* data, size_t size) noexcept {
static const char prefix[] = ".data ";
static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' };
@@ -138,27 +138,11 @@ void Logger::logBinary(uint32_t style, const void* data, size_t size) {
}
}
// ============================================================================
// [asmjit::Logger - LogBinary]
// ============================================================================
void Logger::setOption(uint32_t id, bool value) {
if (id >= kLoggerOptionCount)
return;
uint32_t mask = 1 << id;
if (value)
_options |= mask;
else
_options &= ~mask;
}
// ============================================================================
// [asmjit::Logger - Indentation]
// ============================================================================
void Logger::setIndentation(const char* indentation) {
void Logger::setIndentation(const char* indentation) noexcept {
::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation));
if (!indentation)
return;
@@ -171,17 +155,14 @@ void Logger::setIndentation(const char* indentation) {
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
FileLogger::FileLogger(FILE* stream) : _stream(nullptr) {
setStream(stream);
}
FileLogger::~FileLogger() {}
FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); }
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
void FileLogger::logString(uint32_t style, const char* buf, size_t len) {
void FileLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
if (!_stream)
return;
@@ -195,14 +176,14 @@ void FileLogger::logString(uint32_t style, const char* buf, size_t len) {
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
StringLogger::StringLogger() {}
StringLogger::~StringLogger() {}
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
void StringLogger::logString(uint32_t style, const char* buf, size_t len) {
void StringLogger::logString(uint32_t style, const char* buf, size_t len) noexcept {
_stringBuilder.appendString(buf, len);
}

View File

@@ -9,7 +9,6 @@
#define _ASMJIT_BASE_LOGGER_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_LOGGER)
// [Dependencies - AsmJit]
#include "../base/containers.h"
@@ -25,38 +24,7 @@ namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::LoggerOption]
// ============================================================================
//! Logger options.
ASMJIT_ENUM(LoggerOption) {
//! Whether to output instructions also in binary form.
kLoggerOptionBinaryForm = 0,
//! Whether to output immediates as hexadecimal numbers.
kLoggerOptionHexImmediate = 1,
//! Whether to output displacements as hexadecimal numbers.
kLoggerOptionHexDisplacement = 2,
//! Count of logger options.
kLoggerOptionCount = 3
};
// ============================================================================
// [asmjit::LoggerStyle]
// ============================================================================
//! Logger style.
ASMJIT_ENUM(LoggerStyle) {
kLoggerStyleDefault = 0,
kLoggerStyleDirective = 1,
kLoggerStyleLabel = 2,
kLoggerStyleData = 3,
kLoggerStyleComment = 4,
kLoggerStyleCount = 5
};
#if !defined(ASMJIT_DISABLE_LOGGER)
// ============================================================================
// [asmjit::LogUtil]
@@ -73,7 +41,9 @@ struct LogUtil {
kMaxBinaryLength = 26
};
static bool formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment);
static bool formatLine(
StringBuilder& sb,
const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept;
};
#endif // ASMJIT_EXPORTS
@@ -92,59 +62,88 @@ struct LogUtil {
struct ASMJIT_VIRTAPI Logger {
ASMJIT_NO_COPY(Logger)
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Logger options.
ASMJIT_ENUM(Options) {
//! Whether to output instructions also in binary form.
kOptionBinaryForm = 0,
//! Whether to output immediates as hexadecimal numbers.
kOptionHexImmediate = 1,
//! Whether to output displacements as hexadecimal numbers.
kOptionHexDisplacement = 2,
//! Count of logger options.
kOptionCount = 3
};
// --------------------------------------------------------------------------
// [Style]
// --------------------------------------------------------------------------
//! Logger style.
ASMJIT_ENUM(Style) {
kStyleDefault = 0,
kStyleDirective = 1,
kStyleLabel = 2,
kStyleData = 3,
kStyleComment = 4,
kStyleCount = 5
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Logger` instance.
ASMJIT_API Logger();
ASMJIT_API Logger() noexcept;
//! Destroy the `Logger` instance.
ASMJIT_API virtual ~Logger();
ASMJIT_API virtual ~Logger() noexcept;
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
//! Log output.
virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) = 0;
virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept = 0;
//! Log formatter message (like sprintf) sending output to `logString()` method.
ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...);
ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...) noexcept;
//! Log binary data.
ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size);
ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size) noexcept;
// --------------------------------------------------------------------------
// [Options]
// --------------------------------------------------------------------------
//! Get all logger options as a single integer.
ASMJIT_INLINE uint32_t getOptions() const {
return _options;
}
ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; }
//! Get the given logger option.
ASMJIT_INLINE bool getOption(uint32_t id) const {
ASMJIT_ASSERT(id < kLoggerOptionCount);
return static_cast<bool>((_options >> id) & 0x1);
ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept {
return (_options & option) != 0;
}
//! Set the given logger option.
ASMJIT_API void setOption(uint32_t id, bool value);
ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; }
ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; }
// --------------------------------------------------------------------------
// [Indentation]
// --------------------------------------------------------------------------
//! Get indentation.
ASMJIT_INLINE const char* getIndentation() const {
ASMJIT_INLINE const char* getIndentation() const noexcept {
return _indentation;
}
//! Set indentation.
ASMJIT_API void setIndentation(const char* indentation);
ASMJIT_API void setIndentation(const char* indentation) noexcept;
//! Reset indentation.
ASMJIT_INLINE void resetIndentation() {
ASMJIT_INLINE void resetIndentation() noexcept {
setIndentation(nullptr);
}
@@ -172,10 +171,10 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger {
// --------------------------------------------------------------------------
//! Create a new `FileLogger` that logs to a `FILE` stream.
ASMJIT_API FileLogger(FILE* stream = nullptr);
ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept;
//! Destroy the `FileLogger`.
ASMJIT_API virtual ~FileLogger();
ASMJIT_API virtual ~FileLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
@@ -184,13 +183,13 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger {
//! Get `FILE*` stream.
//!
//! \note Return value can be `nullptr`.
ASMJIT_INLINE FILE* getStream() const {
ASMJIT_INLINE FILE* getStream() const noexcept {
return _stream;
}
//! Set `FILE*` stream, can be set to `nullptr` to disable logging, although
//! the `CodeGen` will still call `logString` even if there is no stream.
ASMJIT_INLINE void setStream(FILE* stream) {
//! the `ExternalTool` will still call `logString` even if there is no stream.
ASMJIT_INLINE void setStream(FILE* stream) noexcept {
_stream = stream;
}
@@ -198,7 +197,7 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger {
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex);
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
// --------------------------------------------------------------------------
// [Members]
@@ -221,10 +220,10 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger {
// --------------------------------------------------------------------------
//! Create new `StringLogger`.
ASMJIT_API StringLogger();
ASMJIT_API StringLogger() noexcept;
//! Destroy the `StringLogger`.
ASMJIT_API virtual ~StringLogger();
ASMJIT_API virtual ~StringLogger() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
@@ -233,17 +232,17 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger {
//! Get `char*` pointer which represents the resulting string.
//!
//! The pointer is owned by `StringLogger`, it can't be modified or freed.
ASMJIT_INLINE const char* getString() const {
ASMJIT_INLINE const char* getString() const noexcept {
return _stringBuilder.getData();
}
//! Get the length of the string returned by `getString()`.
ASMJIT_INLINE size_t getLength() const {
ASMJIT_INLINE size_t getLength() const noexcept {
return _stringBuilder.getLength();
}
//! Clear the resulting string.
ASMJIT_INLINE void clearString() {
ASMJIT_INLINE void clearString() noexcept {
_stringBuilder.clear();
}
@@ -251,7 +250,7 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger {
// [Logging]
// --------------------------------------------------------------------------
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex);
ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept;
// --------------------------------------------------------------------------
// [Members]
@@ -260,6 +259,9 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger {
//! Output.
StringBuilder _stringBuilder;
};
#else
struct Logger;
#endif // !ASMJIT_DISABLE_LOGGER
//! \}
@@ -269,5 +271,4 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger {
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_LOGGER
#endif // _ASMJIT_BASE_LOGGER_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,132 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Dependencies - AsmJit]
#include "../base/podvector.h"
#include "../base/utils.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::PodVectorBase - NullData]
// ============================================================================
const PodVectorBase::Data PodVectorBase::_nullData = { 0, 0 };
static ASMJIT_INLINE bool isDataStatic(PodVectorBase* self, PodVectorBase::Data* d) noexcept {
return (void*)(self + 1) == (void*)d;
}
// ============================================================================
// [asmjit::PodVectorBase - Reset]
// ============================================================================
//! Clear vector data and free internal buffer.
void PodVectorBase::reset(bool releaseMemory) noexcept {
Data* d = _d;
if (d == &_nullData)
return;
if (releaseMemory && !isDataStatic(this, d)) {
ASMJIT_FREE(d);
_d = const_cast<Data*>(&_nullData);
return;
}
d->length = 0;
}
// ============================================================================
// [asmjit::PodVectorBase - Helpers]
// ============================================================================
Error PodVectorBase::_grow(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
size_t threshold = kMemAllocGrowMax / sizeOfT;
size_t capacity = d->capacity;
size_t after = d->length;
if (IntTraits<size_t>::maxValue() - n < after)
return kErrorNoHeapMemory;
after += n;
if (capacity >= after)
return kErrorOk;
// PodVector is used as a linear array for some data structures used by
// AsmJit code generation. The purpose of this agressive growing schema
// is to minimize memory reallocations, because AsmJit code generation
// classes live short life and will be freed or reused soon.
if (capacity < 32)
capacity = 32;
else if (capacity < 128)
capacity = 128;
else if (capacity < 512)
capacity = 512;
while (capacity < after) {
if (capacity < threshold)
capacity *= 2;
else
capacity += threshold;
}
return _reserve(capacity, sizeOfT);
}
Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) noexcept {
Data* d = _d;
if (d->capacity >= n)
return kErrorOk;
size_t nBytes = sizeof(Data) + n * sizeOfT;
if (ASMJIT_UNLIKELY(nBytes < n))
return kErrorNoHeapMemory;
if (d == &_nullData) {
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
d->length = 0;
}
else {
if (isDataStatic(this, d)) {
Data* oldD = d;
d = static_cast<Data*>(ASMJIT_ALLOC(nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
size_t len = d->length;
d->length = len;
::memcpy(d, oldD->getData(), len * sizeOfT);
}
else {
d = static_cast<Data*>(ASMJIT_REALLOC(d, nBytes));
if (ASMJIT_UNLIKELY(d == nullptr))
return kErrorNoHeapMemory;
}
}
d->capacity = n;
_d = d;
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"

278
src/asmjit/base/podvector.h Normal file
View File

@@ -0,0 +1,278 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_BASE_PODVECTOR_H
#define _ASMJIT_BASE_PODVECTOR_H
// [Dependencies - AsmJit]
#include "../base/globals.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::PodVectorBase]
// ============================================================================
//! \internal
struct PodVectorBase {
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
//! \internal
struct Data {
//! Get data.
ASMJIT_INLINE void* getData() const noexcept {
return static_cast<void*>(const_cast<Data*>(this + 1));
}
//! Capacity of the vector.
size_t capacity;
//! Length of the vector.
size_t length;
};
static ASMJIT_API const Data _nullData;
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorBase`.
ASMJIT_INLINE PodVectorBase() noexcept : _d(const_cast<Data*>(&_nullData)) {}
//! Destroy the `PodVectorBase` and its data.
ASMJIT_INLINE ~PodVectorBase() noexcept { reset(true); }
protected:
explicit ASMJIT_INLINE PodVectorBase(Data* d) noexcept : _d(d) {}
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
public:
//! Reset the vector data and set its `length` to zero.
//!
//! If `releaseMemory` is true the vector buffer will be released to the
//! system.
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
protected:
ASMJIT_API Error _grow(size_t n, size_t sizeOfT) noexcept;
ASMJIT_API Error _reserve(size_t n, size_t sizeOfT) noexcept;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
public:
Data* _d;
};
// ============================================================================
// [asmjit::PodVector<T>]
// ============================================================================
//! Template used to store and manage array of POD data.
//!
//! This template has these adventages over other vector<> templates:
//! - Non-copyable (designed to be non-copyable, we want it)
//! - No copy-on-write (some implementations of stl can use it)
//! - Optimized for working only with POD types
//! - Uses ASMJIT_... memory management macros
template <typename T>
struct PodVector : PodVectorBase {
ASMJIT_NO_COPY(PodVector<T>)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVector<T>`.
ASMJIT_INLINE PodVector() noexcept {}
//! Destroy the `PodVector<T>` and its data.
ASMJIT_INLINE ~PodVector() noexcept {}
protected:
explicit ASMJIT_INLINE PodVector(Data* d) noexcept : PodVectorBase(d) {}
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
public:
//! Get whether the vector is empty.
ASMJIT_INLINE bool isEmpty() const noexcept { return _d->length == 0; }
//! Get length.
ASMJIT_INLINE size_t getLength() const noexcept { return _d->length; }
//! Get capacity.
ASMJIT_INLINE size_t getCapacity() const noexcept { return _d->capacity; }
//! Get data.
ASMJIT_INLINE T* getData() noexcept { return static_cast<T*>(_d->getData()); }
//! \overload
ASMJIT_INLINE const T* getData() const noexcept { return static_cast<const T*>(_d->getData()); }
// --------------------------------------------------------------------------
// [Grow / Reserve]
// --------------------------------------------------------------------------
//! Called to grow the buffer to fit at least `n` elements more.
ASMJIT_INLINE Error _grow(size_t n) noexcept { return PodVectorBase::_grow(n, sizeof(T)); }
//! Realloc internal array to fit at least `n` items.
ASMJIT_INLINE Error _reserve(size_t n) noexcept { return PodVectorBase::_reserve(n, sizeof(T)); }
// --------------------------------------------------------------------------
// [Ops]
// --------------------------------------------------------------------------
//! Prepend `item` to vector.
Error prepend(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
_d = d;
}
::memmove(static_cast<T*>(d->getData()) + 1, d->getData(), d->length * sizeof(T));
::memcpy(d->getData(), &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Insert an `item` at the `index`.
Error insert(size_t index, const T& item) noexcept {
Data* d = _d;
ASMJIT_ASSERT(index <= d->length);
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
T* dst = static_cast<T*>(d->getData()) + index;
::memmove(dst + 1, dst, d->length - index);
::memcpy(dst, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Append `item` to vector.
Error append(const T& item) noexcept {
Data* d = _d;
if (d->length == d->capacity) {
ASMJIT_PROPAGATE_ERROR(_grow(1));
d = _d;
}
::memcpy(static_cast<T*>(d->getData()) + d->length, &item, sizeof(T));
d->length++;
return kErrorOk;
}
//! Get index of `val` or `kInvalidIndex` if not found.
size_t indexOf(const T& val) const noexcept {
Data* d = _d;
const T* data = static_cast<const T*>(d->getData());
size_t len = d->length;
for (size_t i = 0; i < len; i++)
if (data[i] == val)
return i;
return kInvalidIndex;
}
//! Remove item at index `i`.
void removeAt(size_t i) noexcept {
Data* d = _d;
ASMJIT_ASSERT(i < d->length);
T* data = static_cast<T*>(d->getData()) + i;
d->length--;
::memmove(data, data + 1, d->length - i);
}
//! Swap this pod-vector with `other`.
void swap(PodVector<T>& other) noexcept {
T* otherData = other._d;
other._d = _d;
_d = otherData;
}
//! Get item at index `i`.
ASMJIT_INLINE T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
//! Get item at index `i`.
ASMJIT_INLINE const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < getLength());
return getData()[i];
}
};
// ============================================================================
// [asmjit::PodVectorTmp<T>]
// ============================================================================
template<typename T, size_t N>
struct PodVectorTmp : public PodVector<T> {
ASMJIT_NO_COPY(PodVectorTmp<T, N>)
// --------------------------------------------------------------------------
// [StaticData]
// --------------------------------------------------------------------------
struct StaticData : public PodVectorBase::Data {
char data[sizeof(T) * N];
};
// --------------------------------------------------------------------------
// [Data]
// --------------------------------------------------------------------------
//! Create a new instance of `PodVectorTmp<T>`.
ASMJIT_INLINE PodVectorTmp() noexcept : PodVector<T>(&_staticData) {
_staticData.capacity = N;
_staticData.length = 0;
}
//! Destroy the `PodVectorTmp<T>` and its data.
ASMJIT_INLINE ~PodVectorTmp() noexcept {}
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
StaticData _staticData;
};
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_BASE_PODVECTOR_H

View File

@@ -9,62 +9,56 @@
// [Dependencies - AsmJit]
#include "../base/assembler.h"
#include "../base/cpuinfo.h"
#include "../base/runtime.h"
// TODO: Rename this, or make call conv independent of CompilerFunc.
#include "../base/compilerfunc.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::Runtime - Construction / Destruction]
// [asmjit::Runtime - Utilities]
// ============================================================================
Runtime::Runtime() {
_sizeLimit = 0;
_baseAddress = kNoBaseAddress;
_runtimeType = kRuntimeTypeNone;
_allocType = kVMemAllocFreeable;
::memset(_reserved, 0, sizeof(_reserved));
}
Runtime::~Runtime() {}
// ============================================================================
// [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================
HostRuntime::HostRuntime() { _runtimeType = kRuntimeTypeJit; }
HostRuntime::~HostRuntime() {}
// ============================================================================
// [asmjit::HostRuntime - Interface]
// ============================================================================
const CpuInfo* HostRuntime::getCpuInfo() {
return CpuInfo::getHost();
}
uint32_t HostRuntime::getStackAlignment() {
static ASMJIT_INLINE uint32_t hostStackAlignment() noexcept {
// By default a pointer-size stack alignment is assumed.
uint32_t alignment = sizeof(intptr_t);
// Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm
// ARM & ARM64
// -----------
//
// - 32-bit ARM requires stack to be aligned to 8 bytes.
// - 64-bit ARM requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
alignment = ASMJIT_ARCH_ARM32 ? 8 : 16;
#endif
// X86 & X64
// ---------
//
// - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, APPLE
// and UNIX guarantees 16-byte stack alignment even in 32-bit, but I'm
// not sure about all other UNIX operating systems, because 16-byte alignment
// is addition to an older specification.
#if (ASMJIT_ARCH_X64) || \
(ASMJIT_ARCH_X86 && (ASMJIT_OS_LINUX || ASMJIT_OS_BSD || ASMJIT_OS_MAC || ASMJIT_OS_ANDROID))
alignment = 16;
// - 64-bit X86 requires stack to be aligned to 16 bytes.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
int modernOS = ASMJIT_OS_LINUX || // Linux & ANDROID.
ASMJIT_OS_MAC || // OSX and iOS.
ASMJIT_OS_BSD; // BSD variants.
alignment = ASMJIT_ARCH_X64 || modernOS ? 16 : 4;
#endif
return alignment;
}
void HostRuntime::flush(void* p, size_t size) {
static ASMJIT_INLINE void hostFlushInstructionCache(void* p, size_t size) noexcept {
// Only useful on non-x86 architectures.
#if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
# if ASMJIT_OS_WINDOWS
// Windows has built-in support in kernel32.dll.
// Windows has a built-in support in kernel32.dll.
::FlushInstructionCache(_memMgr.getProcessHandle(), p, size);
# endif // ASMJIT_OS_WINDOWS
#else
@@ -73,11 +67,51 @@ void HostRuntime::flush(void* p, size_t size) {
#endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64
}
// ============================================================================
// [asmjit::Runtime - Construction / Destruction]
// ============================================================================
Runtime::Runtime() noexcept
: _runtimeType(kTypeNone),
_allocType(kVMemAllocFreeable),
_cpuInfo(),
_stackAlignment(0),
_cdeclConv(kCallConvNone),
_stdCallConv(kCallConvNone),
_baseAddress(kNoBaseAddress),
_sizeLimit(0) {
::memset(_reserved, 0, sizeof(_reserved));
}
Runtime::~Runtime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Construction / Destruction]
// ============================================================================
HostRuntime::HostRuntime() noexcept {
_runtimeType = kTypeJit;
_cpuInfo = CpuInfo::getHost();
_stackAlignment = hostStackAlignment();
_cdeclConv = kCallConvHostCDecl;
_stdCallConv = kCallConvHostStdCall;
}
HostRuntime::~HostRuntime() noexcept {}
// ============================================================================
// [asmjit::HostRuntime - Interface]
// ============================================================================
void HostRuntime::flush(void* p, size_t size) noexcept {
hostFlushInstructionCache(p, size);
}
// ============================================================================
// [asmjit::StaticRuntime - Construction / Destruction]
// ============================================================================
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) {
StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) noexcept {
_sizeLimit = sizeLimit;
_baseAddress = static_cast<Ptr>((uintptr_t)baseAddress);
}
@@ -87,7 +121,7 @@ StaticRuntime::~StaticRuntime() {}
// [asmjit::StaticRuntime - Interface]
// ============================================================================
Error StaticRuntime::add(void** dst, Assembler* assembler) {
Error StaticRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
size_t sizeLimit = _sizeLimit;
@@ -123,7 +157,7 @@ Error StaticRuntime::add(void** dst, Assembler* assembler) {
return kErrorOk;
}
Error StaticRuntime::release(void* p) {
Error StaticRuntime::release(void* p) noexcept {
// There is nothing to release as `StaticRuntime` doesn't manage any memory.
ASMJIT_UNUSED(p);
return kErrorOk;
@@ -133,14 +167,14 @@ Error StaticRuntime::release(void* p) {
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime() {}
JitRuntime::~JitRuntime() {}
JitRuntime::JitRuntime() noexcept {}
JitRuntime::~JitRuntime() noexcept {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::add(void** dst, Assembler* assembler) {
Error JitRuntime::add(void** dst, Assembler* assembler) noexcept {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = nullptr;
@@ -155,17 +189,22 @@ Error JitRuntime::add(void** dst, Assembler* assembler) {
// Relocate the code and release the unused memory back to `VMemMgr`.
size_t relocSize = assembler->relocCode(p);
if (relocSize < codeSize) {
_memMgr.shrink(p, relocSize);
if (relocSize == 0) {
*dst = nullptr;
_memMgr.release(p);
return kErrorInvalidState;
}
if (relocSize < codeSize)
_memMgr.shrink(p, relocSize);
flush(p, relocSize);
*dst = p;
return kErrorOk;
}
Error JitRuntime::release(void* p) {
Error JitRuntime::release(void* p) noexcept {
return _memMgr.release(p);
}

View File

@@ -9,6 +9,7 @@
#define _ASMJIT_BASE_RUNTIME_H
// [Dependencies - AsmJit]
#include "../base/cpuinfo.h"
#include "../base/vmem.h"
// [Api-Begin]
@@ -26,16 +27,6 @@ struct CpuInfo;
//! \addtogroup asmjit_base
//! \{
// ============================================================================
// [asmjit::RuntimeType]
// ============================================================================
ASMJIT_ENUM(RuntimeType) {
kRuntimeTypeNone = 0,
kRuntimeTypeJit = 1,
kRuntimeTypeRemote = 2
};
// ============================================================================
// [asmjit::Runtime]
// ============================================================================
@@ -44,63 +35,96 @@ ASMJIT_ENUM(RuntimeType) {
struct ASMJIT_VIRTAPI Runtime {
ASMJIT_NO_COPY(Runtime)
// --------------------------------------------------------------------------
// [asmjit::RuntimeType]
// --------------------------------------------------------------------------
ASMJIT_ENUM(Type) {
kTypeNone = 0,
kTypeJit = 1,
kTypeRemote = 2
};
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! Create a `Runtime` instance.
ASMJIT_API Runtime();
ASMJIT_API Runtime() noexcept;
//! Destroy the `Runtime` instance.
ASMJIT_API virtual ~Runtime();
ASMJIT_API virtual ~Runtime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get runtime type.
ASMJIT_INLINE uint32_t getRuntimeType() const { return _runtimeType; }
//! Get the runtime type, see \ref Type.
ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; }
//! Get stack alignment of the target.
ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; }
//! Get the CDECL calling convention conforming to the runtime's ABI.
//!
//! NOTE: This is a default calling convention used by the runtime's target.
ASMJIT_INLINE uint32_t getCdeclConv() const noexcept { return _cdeclConv; }
//! Get the STDCALL calling convention conforming to the runtime's ABI.
//!
//! NOTE: STDCALL calling convention is only used by 32-bit x86 target. On
//! all other targets it's mapped to CDECL and calling `getStdcallConv()` will
//! return the same as `getCdeclConv()`.
ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; }
//! Get CPU information.
ASMJIT_INLINE const CpuInfo& getCpuInfo() const noexcept { return _cpuInfo; }
//! Set CPU information.
ASMJIT_INLINE void setCpuInfo(const CpuInfo& ci) noexcept { _cpuInfo = ci; }
//! Get whether the runtime has a base address.
ASMJIT_INLINE bool hasBaseAddress() const { return _baseAddress != kNoBaseAddress; }
ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != kNoBaseAddress; }
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; }
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
//! Get CPU information.
virtual const CpuInfo* getCpuInfo() = 0;
//! Get stack alignment of target runtime.
virtual uint32_t getStackAlignment() = 0;
//! Allocate a memory needed for a code generated by `assembler` and
//! relocate it to the target location.
//!
//! The beginning of the memory allocated for the function is returned in
//! `dst`. Returns Status code as \ref ErrorCode, on failure `dst` is set to
//! `nullptr`.
virtual Error add(void** dst, Assembler* assembler) = 0;
virtual Error add(void** dst, Assembler* assembler) noexcept = 0;
//! Release memory allocated by `add`.
virtual Error release(void* p) = 0;
virtual Error release(void* p) noexcept = 0;
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Type of the runtime.
uint8_t _runtimeType;
//! Type of the allocation.
uint8_t _allocType;
//! Runtime's stack alignment.
uint8_t _stackAlignment;
//! CDECL calling convention conforming to runtime ABI.
uint8_t _cdeclConv;
//! STDCALL calling convention conforming to runtime ABI.
uint8_t _stdCallConv;
//! \internal
uint8_t _reserved[sizeof(intptr_t) - 2];
uint8_t _reserved[3];
//! Runtime CPU information.
CpuInfo _cpuInfo;
//! Base address (-1 means no base address).
Ptr _baseAddress;
//! Maximum size of the code that can be added to the runtime (0=unlimited).
size_t _sizeLimit;
};
// ============================================================================
@@ -116,17 +140,14 @@ struct ASMJIT_VIRTAPI HostRuntime : public Runtime {
// --------------------------------------------------------------------------
//! Create a `HostRuntime` instance.
ASMJIT_API HostRuntime();
ASMJIT_API HostRuntime() noexcept;
//! Destroy the `HostRuntime` instance.
ASMJIT_API virtual ~HostRuntime();
ASMJIT_API virtual ~HostRuntime() noexcept;
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual const CpuInfo* getCpuInfo();
ASMJIT_API virtual uint32_t getStackAlignment();
//! Flush an instruction cache.
//!
//! This member function is called after the code has been copied to the
@@ -138,7 +159,7 @@ struct ASMJIT_VIRTAPI HostRuntime : public Runtime {
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(void* p, size_t size);
ASMJIT_API virtual void flush(void* p, size_t size) noexcept;
};
// ============================================================================
@@ -159,31 +180,31 @@ struct ASMJIT_VIRTAPI StaticRuntime : public HostRuntime {
//! Create a `StaticRuntime` instance.
//!
//! The `address` specifies a fixed target address, which will be used as a
//! base address for relocation, and `sizeLimit` specified the maximum size
//! base address for relocation, and `sizeLimit` specifies the maximum size
//! of a code that can be copied to it. If there is no limit `sizeLimit`
//! should be zero.
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0);
ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0) noexcept;
//! Destroy the `StaticRuntime` instance.
ASMJIT_API virtual ~StaticRuntime();
ASMJIT_API virtual ~StaticRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the base address.
ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; }
ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; }
//! Get the maximum size of the code that can be relocated/stored in the target.
//!
//! Returns zero if unlimited.
ASMJIT_INLINE size_t getSizeLimit() const { return _sizeLimit; }
ASMJIT_INLINE size_t getSizeLimit() const noexcept { return _sizeLimit; }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p);
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
};
// ============================================================================
@@ -199,28 +220,28 @@ struct ASMJIT_VIRTAPI JitRuntime : public HostRuntime {
// --------------------------------------------------------------------------
//! Create a `JitRuntime` instance.
ASMJIT_API JitRuntime();
ASMJIT_API JitRuntime() noexcept;
//! Destroy the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime();
ASMJIT_API virtual ~JitRuntime() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the type of allocation.
ASMJIT_INLINE uint32_t getAllocType() const { return _allocType; }
ASMJIT_INLINE uint32_t getAllocType() const noexcept { return _allocType; }
//! Set the type of allocation.
ASMJIT_INLINE void setAllocType(uint32_t allocType) { _allocType = allocType; }
ASMJIT_INLINE void setAllocType(uint32_t allocType) noexcept { _allocType = allocType; }
//! Get the virtual memory manager.
ASMJIT_INLINE VMemMgr* getMemMgr() const { return const_cast<VMemMgr*>(&_memMgr); }
ASMJIT_INLINE VMemMgr* getMemMgr() const noexcept { return const_cast<VMemMgr*>(&_memMgr); }
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error add(void** dst, Assembler* assembler);
ASMJIT_API virtual Error release(void* p);
ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept;
ASMJIT_API virtual Error release(void* p) noexcept;
// --------------------------------------------------------------------------
// [Members]

View File

@@ -43,7 +43,7 @@ namespace asmjit {
static volatile uint32_t Utils_hiResTicks;
static volatile double Utils_hiResFreq;
uint32_t Utils::getTickCount() {
uint32_t Utils::getTickCount() noexcept {
do {
uint32_t hiResOk = Utils_hiResTicks;
@@ -87,7 +87,7 @@ uint32_t Utils::getTickCount() {
#elif ASMJIT_OS_MAC
static mach_timebase_info_data_t CpuTicks_machTime;
uint32_t Utils::getTickCount() {
uint32_t Utils::getTickCount() noexcept {
// Initialize the first time CpuTicks::now() is called (See Apple's QA1398).
if (CpuTicks_machTime.denom == 0) {
if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS)
@@ -106,7 +106,7 @@ uint32_t Utils::getTickCount() {
// ============================================================================
#else
uint32_t Utils::getTickCount() {
uint32_t Utils::getTickCount() noexcept {
#if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
struct timespec ts;
@@ -122,6 +122,10 @@ uint32_t Utils::getTickCount() {
}
#endif // ASMJIT_OS
// ============================================================================
// [asmjit::Utils - Unit]
// ============================================================================
#if defined(ASMJIT_TEST)
UNIT(base_utils) {
uint32_t i;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -71,7 +71,7 @@ namespace asmjit {
struct VMemLocal {
// AsmJit allows to pass a `nullptr` handle to `VMemUtil`. This function is
// just a convenient way to convert such handle to the current process one.
ASMJIT_INLINE HANDLE getSafeProcessHandle(HANDLE hParam) const {
ASMJIT_INLINE HANDLE getSafeProcessHandle(HANDLE hParam) const noexcept {
return hParam != nullptr ? hParam : hProcess;
}
@@ -81,7 +81,7 @@ struct VMemLocal {
};
static VMemLocal vMemLocal;
static const VMemLocal& vMemGet() {
static const VMemLocal& vMemGet() noexcept {
VMemLocal& vMem = vMemLocal;
if (!vMem.hProcess) {
@@ -97,21 +97,21 @@ static const VMemLocal& vMemGet() {
return vMem;
};
size_t VMemUtil::getPageSize() {
size_t VMemUtil::getPageSize() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageSize;
}
size_t VMemUtil::getPageGranularity() {
size_t VMemUtil::getPageGranularity() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageGranularity;
}
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) {
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept {
return allocProcessMemory(static_cast<HANDLE>(0), length, allocated, flags);
}
void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) {
void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept {
if (length == 0)
return nullptr;
@@ -141,11 +141,11 @@ void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* alloc
return mBase;
}
Error VMemUtil::release(void* addr, size_t length) {
Error VMemUtil::release(void* addr, size_t length) noexcept {
return releaseProcessMemory(static_cast<HANDLE>(0), addr, length);
}
Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* length */) {
Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* length */) noexcept {
hProcess = vMemGet().getSafeProcessHandle(hProcess);
if (!::VirtualFreeEx(hProcess, addr, 0, MEM_RELEASE))
return kErrorInvalidState;
@@ -171,7 +171,7 @@ struct VMemLocal {
};
static VMemLocal vMemLocal;
static const VMemLocal& vMemGet() {
static const VMemLocal& vMemGet() noexcept {
VMemLocal& vMem = vMemLocal;
if (!vMem.pageSize) {
@@ -183,17 +183,17 @@ static const VMemLocal& vMemGet() {
return vMem;
};
size_t VMemUtil::getPageSize() {
size_t VMemUtil::getPageSize() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageSize;
}
size_t VMemUtil::getPageGranularity() {
size_t VMemUtil::getPageGranularity() noexcept {
const VMemLocal& vMem = vMemGet();
return vMem.pageGranularity;
}
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) {
void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept {
const VMemLocal& vMem = vMemGet();
size_t msize = Utils::alignTo<size_t>(length, vMem.pageSize);
int protection = PROT_READ;
@@ -210,7 +210,7 @@ void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) {
return mbase;
}
Error VMemUtil::release(void* addr, size_t length) {
Error VMemUtil::release(void* addr, size_t length) noexcept {
if (::munmap(addr, length) != 0)
return kErrorInvalidState;
@@ -233,7 +233,7 @@ enum {
//! \internal
//!
//! Set `len` bits in `buf` starting at `index` bit index.
static void _SetBits(size_t* buf, size_t index, size_t len) {
static void _SetBits(size_t* buf, size_t index, size_t len) noexcept {
if (len == 0)
return;
@@ -290,14 +290,14 @@ struct VMemMgr::RbNode {
//! \internal
//!
//! Get whether the node is red (nullptr or node with red flag).
static ASMJIT_INLINE bool rbIsRed(RbNode* node) {
static ASMJIT_INLINE bool rbIsRed(RbNode* node) noexcept {
return node != nullptr && node->red;
}
//! \internal
//!
//! Check whether the RB tree is valid.
static int rbAssert(RbNode* root) {
static int rbAssert(RbNode* root) noexcept {
if (root == nullptr)
return 1;
@@ -327,7 +327,7 @@ static int rbAssert(RbNode* root) {
//! \internal
//!
//! Single rotation.
static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) {
static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) noexcept {
RbNode* save = root->node[!dir];
root->node[!dir] = save->node[dir];
@@ -342,7 +342,7 @@ static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) {
//! \internal
//!
//! Double rotation.
static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) {
static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) noexcept {
root->node[!dir] = rbRotateSingle(root->node[!dir], !dir);
return rbRotateSingle(root, dir);
}
@@ -357,11 +357,11 @@ struct VMemMgr::MemNode : public RbNode {
// --------------------------------------------------------------------------
// Get available space.
ASMJIT_INLINE size_t getAvailable() const {
ASMJIT_INLINE size_t getAvailable() const noexcept {
return size - used;
}
ASMJIT_INLINE void fillData(MemNode* other) {
ASMJIT_INLINE void fillData(MemNode* other) noexcept {
mem = other->mem;
size = other->size;
@@ -404,7 +404,7 @@ struct VMemMgr::PermanentNode {
// --------------------------------------------------------------------------
//! Get available space.
ASMJIT_INLINE size_t getAvailable() const {
ASMJIT_INLINE size_t getAvailable() const noexcept {
return size - used;
}
@@ -425,7 +425,7 @@ struct VMemMgr::PermanentNode {
//! \internal
//!
//! Helper to avoid `#ifdef`s in the code.
ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) {
ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) noexcept {
uint32_t flags = kVMemFlagWritable | kVMemFlagExecutable;
#if !ASMJIT_OS_WINDOWS
return static_cast<uint8_t*>(VMemUtil::alloc(size, vSize, flags));
@@ -437,7 +437,7 @@ ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSiz
//! \internal
//!
//! Helper to avoid `#ifdef`s in the code.
ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) {
ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) noexcept {
#if !ASMJIT_OS_WINDOWS
return VMemUtil::release(p, vSize);
#else
@@ -448,7 +448,7 @@ ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) {
//! \internal
//!
//! Check whether the Red-Black tree is valid.
static bool vMemMgrCheckTree(VMemMgr* self) {
static bool vMemMgrCheckTree(VMemMgr* self) noexcept {
return rbAssert(self->_root) > 0;
}
@@ -457,7 +457,7 @@ static bool vMemMgrCheckTree(VMemMgr* self) {
//! Alloc virtual memory including a heap memory needed for `MemNode` data.
//!
//! Returns set-up `MemNode*` or nullptr if allocation failed.
static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) {
static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) noexcept {
size_t vSize;
uint8_t* vmem = vMemMgrAllocVMem(self, size, &vSize);
@@ -502,7 +502,7 @@ static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) {
return node;
}
static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) {
static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept {
if (self->_root == nullptr) {
// Empty tree case.
self->_root = node;
@@ -586,7 +586,7 @@ static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) {
//!
//! Returns node that should be freed, but it doesn't have to be necessarily
//! the `node` passed.
static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) {
static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept {
// False tree root.
RbNode head = { { nullptr, nullptr }, 0, 0 };
@@ -686,7 +686,7 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) {
return static_cast<MemNode*>(q);
}
static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) {
static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) noexcept {
MemNode* node = self->_root;
while (node != nullptr) {
uint8_t* nodeMem = node->mem;
@@ -710,7 +710,7 @@ static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) {
return node;
}
static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) {
static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) noexcept {
static const size_t permanentAlignment = 32;
static const size_t permanentNodeSize = 32768;
@@ -760,7 +760,7 @@ static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) {
return static_cast<void*>(result);
}
static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) {
static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) noexcept {
// Current index.
size_t i;
@@ -892,7 +892,7 @@ _Found:
//! Reset the whole `VMemMgr` instance, freeing all heap memory allocated an
//! virtual memory allocated unless `keepVirtualMemory` is true (and this is
//! only used when writing data to a remote process).
static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) {
static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) noexcept {
MemNode* node = self->_first;
while (node != nullptr) {
@@ -921,13 +921,12 @@ static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) {
// ============================================================================
#if !ASMJIT_OS_WINDOWS
VMemMgr::VMemMgr()
VMemMgr::VMemMgr() noexcept
#else
VMemMgr::VMemMgr(HANDLE hProcess) :
VMemMgr::VMemMgr(HANDLE hProcess) noexcept :
_hProcess(vMemGet().getSafeProcessHandle(hProcess))
#endif // ASMJIT_OS_WINDOWS
{
_blockSize = VMemUtil::getPageGranularity();
_blockDensity = 64;
@@ -943,7 +942,7 @@ VMemMgr::VMemMgr(HANDLE hProcess) :
_keepVirtualMemory = false;
}
VMemMgr::~VMemMgr() {
VMemMgr::~VMemMgr() noexcept {
// Freeable memory cleanup - Also frees the virtual memory if configured to.
vMemMgrReset(this, _keepVirtualMemory);
@@ -960,7 +959,7 @@ VMemMgr::~VMemMgr() {
// [asmjit::VMemMgr - Reset]
// ============================================================================
void VMemMgr::reset() {
void VMemMgr::reset() noexcept {
vMemMgrReset(this, false);
}
@@ -968,14 +967,14 @@ void VMemMgr::reset() {
// [asmjit::VMemMgr - Alloc / Release]
// ============================================================================
void* VMemMgr::alloc(size_t size, uint32_t type) {
void* VMemMgr::alloc(size_t size, uint32_t type) noexcept {
if (type == kVMemAllocPermanent)
return vMemMgrAllocPermanent(this, size);
else
return vMemMgrAllocFreeable(this, size);
}
Error VMemMgr::release(void* p) {
Error VMemMgr::release(void* p) noexcept {
if (p == nullptr)
return kErrorOk;
@@ -1062,7 +1061,7 @@ Error VMemMgr::release(void* p) {
return kErrorOk;
}
Error VMemMgr::shrink(void* p, size_t used) {
Error VMemMgr::shrink(void* p, size_t used) noexcept {
if (p == nullptr)
return kErrorOk;
@@ -1149,7 +1148,7 @@ _EnterFreeLoop:
// ============================================================================
#if defined(ASMJIT_TEST)
static void VMemTest_fill(void* a, void* b, int i) {
static void VMemTest_fill(void* a, void* b, int i) noexcept {
int pattern = rand() % 256;
*(int *)a = i;
*(int *)b = i;
@@ -1157,7 +1156,7 @@ static void VMemTest_fill(void* a, void* b, int i) {
::memset((char*)b + sizeof(int), pattern, i - sizeof(int));
}
static void VMemTest_verify(void* a, void* b) {
static void VMemTest_verify(void* a, void* b) noexcept {
int ai = *(int*)a;
int bi = *(int*)b;
@@ -1168,12 +1167,12 @@ static void VMemTest_verify(void* a, void* b) {
"Pattern (%p) doesn't match", a);
}
static void VMemTest_stats(VMemMgr& memmgr) {
static void VMemTest_stats(VMemMgr& memmgr) noexcept {
INFO("Used : %u", static_cast<unsigned int>(memmgr.getUsedBytes()));
INFO("Allocated: %u", static_cast<unsigned int>(memmgr.getAllocatedBytes()));
}
static void VMemTest_shuffle(void** a, void** b, size_t count) {
static void VMemTest_shuffle(void** a, void** b, size_t count) noexcept {
for (size_t i = 0; i < count; ++i) {
size_t si = (size_t)rand() % count;

View File

@@ -60,26 +60,26 @@ ASMJIT_ENUM(VMemFlags) {
//! overview on how to use a platform specific APIs.
struct VMemUtil {
//! Get a size/alignment of a single virtual memory page.
static ASMJIT_API size_t getPageSize();
static ASMJIT_API size_t getPageSize() noexcept;
//! Get a recommended granularity for a single `alloc` call.
static ASMJIT_API size_t getPageGranularity();
static ASMJIT_API size_t getPageGranularity() noexcept;
//! Allocate virtual memory.
//!
//! Pages are readable/writeable, but they are not guaranteed to be
//! executable unless 'canExecute' is true. Returns the address of
//! allocated memory, or `nullptr` on failure.
static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags);
static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Free memory allocated by `alloc()`.
static ASMJIT_API Error release(void* addr, size_t length);
static ASMJIT_API Error release(void* addr, size_t length) noexcept;
#if ASMJIT_OS_WINDOWS
//! Allocate virtual memory of `hProcess` (Windows only).
static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags);
static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept;
//! Release virtual memory of `hProcess` (Windows only).
static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length);
static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length) noexcept;
#endif // ASMJIT_OS_WINDOWS
};
@@ -96,25 +96,25 @@ struct VMemMgr {
#if !ASMJIT_OS_WINDOWS
//! Create a `VMemMgr` instance.
ASMJIT_API VMemMgr();
ASMJIT_API VMemMgr() noexcept;
#else
//! Create a `VMemMgr` instance.
//!
//! \note When running on Windows it's possible to specify a `hProcess` to
//! be used for memory allocation. This allows to allocate memory of remote
//! process.
ASMJIT_API VMemMgr(HANDLE hProcess = static_cast<HANDLE>(0));
ASMJIT_API VMemMgr(HANDLE hProcess = static_cast<HANDLE>(0)) noexcept;
#endif // ASMJIT_OS_WINDOWS
//! Destroy the `VMemMgr` instance and free all blocks.
ASMJIT_API ~VMemMgr();
ASMJIT_API ~VMemMgr() noexcept;
// --------------------------------------------------------------------------
// [Reset]
// --------------------------------------------------------------------------
//! Free all allocated memory.
ASMJIT_API void reset();
ASMJIT_API void reset() noexcept;
// --------------------------------------------------------------------------
// [Accessors]
@@ -122,25 +122,25 @@ struct VMemMgr {
#if ASMJIT_OS_WINDOWS
//! Get the handle of the process memory manager is bound to.
ASMJIT_INLINE HANDLE getProcessHandle() const {
ASMJIT_INLINE HANDLE getProcessHandle() const noexcept {
return _hProcess;
}
#endif // ASMJIT_OS_WINDOWS
//! Get how many bytes are currently allocated.
ASMJIT_INLINE size_t getAllocatedBytes() const {
ASMJIT_INLINE size_t getAllocatedBytes() const noexcept {
return _allocatedBytes;
}
//! Get how many bytes are currently used.
ASMJIT_INLINE size_t getUsedBytes() const {
ASMJIT_INLINE size_t getUsedBytes() const noexcept {
return _usedBytes;
}
//! Get whether to keep allocated memory after the `VMemMgr` is destroyed.
//!
//! \sa \ref setKeepVirtualMemory.
ASMJIT_INLINE bool getKeepVirtualMemory() const {
ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept {
return _keepVirtualMemory;
}
@@ -156,7 +156,7 @@ struct VMemMgr {
//! \note Memory allocated with kVMemAllocPermanent is always kept.
//!
//! \sa \ref getKeepVirtualMemory.
ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) {
ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) noexcept {
_keepVirtualMemory = keepVirtualMemory;
}
@@ -169,13 +169,13 @@ struct VMemMgr {
//! Note that if you are implementing your own virtual memory manager then you
//! can quitly ignore type of allocation. This is mainly for AsmJit to memory
//! manager that allocated memory will be never freed.
ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable);
ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable) noexcept;
//! Free previously allocated memory at a given `address`.
ASMJIT_API Error release(void* p);
ASMJIT_API Error release(void* p) noexcept;
//! Free extra memory allocated with `p`.
ASMJIT_API Error shrink(void* p, size_t used);
ASMJIT_API Error shrink(void* p, size_t used) noexcept;
// --------------------------------------------------------------------------
// [Members]

View File

@@ -28,12 +28,12 @@ static const Zone::Block Zone_zeroBlock = {
// [asmjit::Zone - Construction / Destruction]
// ============================================================================
Zone::Zone(size_t blockSize) {
Zone::Zone(size_t blockSize) noexcept {
_block = const_cast<Zone::Block*>(&Zone_zeroBlock);
_blockSize = blockSize;
}
Zone::~Zone() {
Zone::~Zone() noexcept {
reset(true);
}
@@ -41,7 +41,7 @@ Zone::~Zone() {
// [asmjit::Zone - Reset]
// ============================================================================
void Zone::reset(bool releaseMemory) {
void Zone::reset(bool releaseMemory) noexcept {
Block* cur = _block;
// Can't be altered.
@@ -80,7 +80,7 @@ void Zone::reset(bool releaseMemory) {
// [asmjit::Zone - Alloc]
// ============================================================================
void* Zone::_alloc(size_t size) {
void* Zone::_alloc(size_t size) noexcept {
Block* curBlock = _block;
size_t blockSize = Utils::iMax<size_t>(_blockSize, size);
@@ -129,14 +129,14 @@ void* Zone::_alloc(size_t size) {
return static_cast<void*>(newBlock->data);
}
void* Zone::allocZeroed(size_t size) {
void* Zone::allocZeroed(size_t size) noexcept {
void* p = alloc(size);
if (p != nullptr)
::memset(p, 0, size);
return p;
}
void* Zone::dup(const void* data, size_t size) {
void* Zone::dup(const void* data, size_t size) noexcept {
if (data == nullptr)
return nullptr;
@@ -151,7 +151,7 @@ void* Zone::dup(const void* data, size_t size) {
return m;
}
char* Zone::sdup(const char* str) {
char* Zone::sdup(const char* str) noexcept {
if (str == nullptr)
return nullptr;
@@ -172,7 +172,7 @@ char* Zone::sdup(const char* str) {
return m;
}
char* Zone::sformat(const char* fmt, ...) {
char* Zone::sformat(const char* fmt, ...) noexcept {
if (fmt == nullptr)
return nullptr;

View File

@@ -48,12 +48,12 @@ struct Zone {
// ------------------------------------------------------------------------
//! Get the size of the block.
ASMJIT_INLINE size_t getBlockSize() const {
ASMJIT_INLINE size_t getBlockSize() const noexcept {
return (size_t)(end - data);
}
//! Get count of remaining bytes in the block.
ASMJIT_INLINE size_t getRemainingSize() const {
ASMJIT_INLINE size_t getRemainingSize() const noexcept {
return (size_t)(end - pos);
}
@@ -98,13 +98,13 @@ struct Zone {
//! It's not required, but it's good practice to set `blockSize` to a
//! reasonable value that depends on the usage of `Zone`. Greater block sizes
//! are generally safer and performs better than unreasonably low values.
ASMJIT_API Zone(size_t blockSize);
ASMJIT_API Zone(size_t blockSize) noexcept;
//! Destroy the `Zone` instance.
//!
//! This will destroy the `Zone` instance and release all blocks of memory
//! allocated by it. It performs implicit `reset(true)`.
ASMJIT_API ~Zone();
ASMJIT_API ~Zone() noexcept;
// --------------------------------------------------------------------------
// [Reset]
@@ -113,14 +113,14 @@ struct Zone {
//! Reset the `Zone` invalidating all blocks allocated.
//!
//! If `releaseMemory` is true all buffers will be released to the system.
ASMJIT_API void reset(bool releaseMemory = false);
ASMJIT_API void reset(bool releaseMemory = false) noexcept;
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get the default block size.
ASMJIT_INLINE size_t getBlockSize() const {
ASMJIT_INLINE size_t getBlockSize() const noexcept {
return _blockSize;
}
@@ -160,7 +160,7 @@ struct Zone {
//! // Reset of destroy `Zone`.
//! zone.reset();
//! ~~~
ASMJIT_INLINE void* alloc(size_t size) {
ASMJIT_INLINE void* alloc(size_t size) noexcept {
Block* cur = _block;
uint8_t* ptr = cur->pos;
@@ -178,31 +178,31 @@ struct Zone {
//! Allocate `size` bytes of zeroed memory.
//!
//! See \ref alloc() for more details.
ASMJIT_API void* allocZeroed(size_t size);
ASMJIT_API void* allocZeroed(size_t size) noexcept;
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) {
ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(alloc(size));
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) {
ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) noexcept {
return static_cast<T*>(allocZeroed(size));
}
//! \internal
ASMJIT_API void* _alloc(size_t size);
ASMJIT_API void* _alloc(size_t size) noexcept;
//! Helper to duplicate data.
ASMJIT_API void* dup(const void* data, size_t size);
ASMJIT_API void* dup(const void* data, size_t size) noexcept;
//! Helper to duplicate string.
ASMJIT_API char* sdup(const char* str);
ASMJIT_API char* sdup(const char* str) noexcept;
//! Helper to duplicate formatted string, maximum length is 256 bytes.
ASMJIT_API char* sformat(const char* str, ...);
ASMJIT_API char* sformat(const char* str, ...) noexcept;
// --------------------------------------------------------------------------
// [Members]

View File

@@ -70,13 +70,13 @@
// AsmJit features are enabled by default.
// #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely).
// #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely).
// #define ASMJIT_DISABLE_NAMES // Disable everything that uses strings
// // (instruction names, error names, ...).
// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text
// // representation (instructions, errors, ...).
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_DISABLE_NAMES) && !defined(ASMJIT_DISABLE_LOGGER)
# error "[asmjit] ASMJIT_DISABLE_NAMES requires ASMJIT_DISABLE_LOGGER to be defined."
#endif // ASMJIT_DISABLE_NAMES && !ASMJIT_DISABLE_LOGGER
#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGER)
# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGER to be defined."
#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGER
// Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside.
#if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) && !defined(NDEBUG)
@@ -226,7 +226,7 @@
// ============================================================================
// [@ARCH{@]
// \def ASMJIT_ARCH_ARM
// \def ASMJIT_ARCH_ARM32
// True if the target architecture is a 32-bit ARM.
//
// \def ASMJIT_ARCH_ARM64
@@ -268,17 +268,17 @@
# define ASMJIT_ARCH_ARM64 0
#endif
#if (defined(_M_ARM ) || defined(__arm__ ) || defined(__arm) || \
defined(_M_ARMT ) || defined(__thumb__))
# define ASMJIT_ARCH_ARM (!ASMJIT_ARCH_ARM64)
#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \
defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__))
# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64)
#else
# define ASMJIT_ARCH_ARM 0
# define ASMJIT_ARCH_ARM32 0
#endif
#define ASMJIT_ARCH_LE ( \
ASMJIT_ARCH_X86 || \
ASMJIT_ARCH_X64 || \
ASMJIT_ARCH_ARM || \
ASMJIT_ARCH_ARM32 || \
ASMJIT_ARCH_ARM64 )
#define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE))
#define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64)
@@ -690,7 +690,7 @@
// [@CC_NOEXCEPT{@]
// \def ASMJIT_NOEXCEPT
// The decorated function never throws an exception (noexcept).
#if ASMJIT_HAS_NOEXCEPT
#if ASMJIT_CC_HAS_NOEXCEPT
# define ASMJIT_NOEXCEPT noexcept
#else
# define ASMJIT_NOEXCEPT
@@ -860,10 +860,10 @@ typedef unsigned __int64 uint64_t;
# endif
#endif // !ASMJIT_ALLOC && !ASMJIT_REALLOC && !ASMJIT_FREE
#define ASMJIT_NO_COPY(Self) \
#define ASMJIT_NO_COPY(...) \
private: \
ASMJIT_INLINE Self(const Self& other); \
ASMJIT_INLINE Self& operator=(const Self& other); \
ASMJIT_INLINE __VA_ARGS__(const __VA_ARGS__& other); \
ASMJIT_INLINE __VA_ARGS__& operator=(const __VA_ARGS__& other); \
public:
// ============================================================================

View File

@@ -35,9 +35,6 @@ typedef X86YmmReg YmmReg;
typedef X86SegReg SegReg;
typedef X86Mem Mem;
// Define host utilities.
typedef X86CpuInfo HostCpuInfo;
// Define host compiler and related.
#if !defined(ASMJIT_DISABLE_COMPILER)
typedef X86Compiler HostCompiler;

View File

@@ -14,7 +14,6 @@
#include "./x86/x86assembler.h"
#include "./x86/x86compiler.h"
#include "./x86/x86compilerfunc.h"
#include "./x86/x86cpuinfo.h"
#include "./x86/x86inst.h"
#include "./x86/x86operand.h"

View File

@@ -13,12 +13,12 @@
// [Dependencies - AsmJit]
#include "../base/containers.h"
#include "../base/cpuinfo.h"
#include "../base/logger.h"
#include "../base/runtime.h"
#include "../base/utils.h"
#include "../base/vmem.h"
#include "../x86/x86assembler.h"
#include "../x86/x86cpuinfo.h"
// [Api-Begin]
#include "../apibegin.h"
@@ -99,17 +99,10 @@ struct X86OpCodeMM {
//! \internal
//!
//! Mandatory prefixes encoded in 'asmjit' opcode [66, F3, F2] and asmjit
//! Mandatory prefixes encoded in 'asmjit' opcode [66, F3, F2] and AsmJit
//! extensions
static const uint8_t x86OpCodePP[8] = {
0x00,
0x66,
0xF3,
0xF2,
0x00,
0x00,
0x00,
0x9B
0x00, 0x66, 0xF3, 0xF2, 0x00, 0x00, 0x00, 0x9B
};
//! \internal
@@ -206,70 +199,65 @@ static ASMJIT_INLINE bool x86IsYmm(const X86Reg* reg) { return reg->isYmm(); }
// [Macros]
// ============================================================================
#define ENC_OPS(_Op0_, _Op1_, _Op2_) \
((kOperandType##_Op0_) + ((kOperandType##_Op1_) << 3) + ((kOperandType##_Op2_) << 6))
#define ENC_OPS(op0, op1, op2) \
((Operand::kType##op0) + ((Operand::kType##op1) << 3) + ((Operand::kType##op2) << 6))
#define ADD_66H_P(_Exp_) \
#define ADD_66H_P(exp) \
do { \
opCode |= (static_cast<uint32_t>(_Exp_) << kX86InstOpCode_PP_Shift); \
opCode |= (static_cast<uint32_t>(exp) << kX86InstOpCode_PP_Shift); \
} while (0)
#define ADD_66H_P_BY_SIZE(_Size_) \
#define ADD_66H_P_BY_SIZE(sz) \
do { \
opCode |= (static_cast<uint32_t>(_Size_) & 0x02) << (kX86InstOpCode_PP_Shift - 1); \
opCode |= (static_cast<uint32_t>(sz) & 0x02) << (kX86InstOpCode_PP_Shift - 1); \
} while (0)
#define ADD_REX_W(_Exp_) \
#define ADD_REX_W(exp) \
do { \
if (Arch == kArchX64) \
opCode |= static_cast<uint32_t>(_Exp_) << kX86InstOpCode_W_Shift; \
opCode |= static_cast<uint32_t>(exp) << kX86InstOpCode_W_Shift; \
} while (0)
#define ADD_REX_W_BY_SIZE(_Size_) \
#define ADD_REX_W_BY_SIZE(sz) \
do { \
if (Arch == kArchX64 && (_Size_) == 8) \
if (Arch == kArchX64 && (sz) == 8) \
opCode |= kX86InstOpCode_W; \
} while (0)
#define ADD_VEX_W(_Exp_) \
#define ADD_VEX_W(exp) \
do { \
opCode |= static_cast<uint32_t>(_Exp_) << kX86InstOpCode_W_Shift; \
opCode |= static_cast<uint32_t>(exp) << kX86InstOpCode_W_Shift; \
} while (0)
#define ADD_VEX_L(_Exp_) \
#define ADD_VEX_L(exp) \
do { \
opCode |= static_cast<uint32_t>(_Exp_) << kX86InstOpCode_L_Shift; \
opCode |= static_cast<uint32_t>(exp) << kX86InstOpCode_L_Shift; \
} while (0)
#define EMIT_BYTE(_Val_) \
do { \
cursor[0] = static_cast<uint8_t>(_Val_); \
cursor[0] = static_cast<uint8_t>((_Val_) & 0xFF); \
cursor += 1; \
} while (0)
#define EMIT_WORD(_Val_) \
do { \
reinterpret_cast<uint16_t*>(cursor)[0] = static_cast<uint16_t>(_Val_); \
Utils::writeU16uLE(cursor, static_cast<uint32_t>(_Val_)); \
cursor += 2; \
} while (0)
#define EMIT_DWORD(_Val_) \
do { \
reinterpret_cast<uint32_t*>(cursor)[0] = static_cast<uint32_t>(_Val_); \
Utils::writeU32uLE(cursor, static_cast<uint32_t>(_Val_)); \
cursor += 4; \
} while (0)
#define EMIT_QWORD(_Val_) \
do { \
reinterpret_cast<uint64_t*>(cursor)[0] = static_cast<uint64_t>(_Val_); \
Utils::writeU64uLE(cursor, static_cast<uint64_t>(_Val_)); \
cursor += 8; \
} while (0)
#define EMIT_OP(_Val_) \
do { \
EMIT_BYTE((_Val_) & 0xFF); \
} while (0)
#define EMIT_PP(_Val_) \
do { \
uint32_t ppIndex = ((_Val_) >> kX86InstOpCode_PP_Shift) & (kX86InstOpCode_PP_Mask >> kX86InstOpCode_PP_Shift); \
@@ -309,6 +297,7 @@ X86Assembler::X86Assembler(Runtime* runtime, uint32_t arch)
zbp(NoInit),
zsi(NoInit),
zdi(NoInit) {
ASMJIT_ASSERT(arch == kArchX86 || arch == kArchX64);
_setArch(arch);
}
@@ -367,13 +356,12 @@ Error X86Assembler::embedLabel(const Label& op) {
ASMJIT_PROPAGATE_ERROR(_grow(regSize));
uint8_t* cursor = getCursor();
LabelData* label = getLabelData(op.getId());
RelocData rd;
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleData, regSize == 4 ? ".dd L%u\n" : ".dq L%u\n", op.getId());
_logger->logFormat(Logger::kStyleData, regSize == 4 ? ".dd L%u\n" : ".dq L%u\n", op.getId());
#endif // !ASMJIT_DISABLE_LOGGER
rd.type = kRelocRelToAbs;
@@ -392,19 +380,18 @@ Error X86Assembler::embedLabel(const Label& op) {
link->prev = (LabelLink*)label->links;
link->offset = getOffset();
link->displacement = 0;
link->relocId = _relocList.getLength();
link->relocId = _relocations.getLength();
label->links = link;
}
if (_relocList.append(rd) != kErrorOk)
if (_relocations.append(rd) != kErrorOk)
return setLastError(kErrorNoHeapMemory);
// Emit dummy intptr_t (4 or 8 bytes; depends on the address size).
if (regSize == 4)
EMIT_DWORD(0);
else
EMIT_QWORD(0);
if (regSize == 8)
EMIT_DWORD(0);
setCursor(cursor);
return kErrorOk;
@@ -414,10 +401,10 @@ Error X86Assembler::embedLabel(const Label& op) {
// [asmjit::X86Assembler - Align]
// ============================================================================
Error X86Assembler::align(uint32_t alignMode, uint32_t offset) {
Error X86Assembler::align(uint32_t alignMode, uint32_t offset) noexcept {
#if !defined(ASMJIT_DISABLE_LOGGER)
if (_logger)
_logger->logFormat(kLoggerStyleDirective,
_logger->logFormat(Logger::kStyleDirective,
"%s.align %u\n", _logger->getIndentation(), static_cast<unsigned int>(offset));
#endif // !ASMJIT_DISABLE_LOGGER
@@ -436,7 +423,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) {
switch (alignMode) {
case kAlignCode: {
if (hasFeature(kAssemblerFeatureOptimizedAlign)) {
if (hasAsmOption(kOptionOptimizedAlign)) {
// Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 2B (NOP).
enum { kMaxNopSize = 9 };
@@ -454,7 +441,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) {
do {
uint32_t n = Utils::iMin<uint32_t>(i, kMaxNopSize);
const uint8_t* p = nopData[(n - 1)];
const uint8_t* p = nopData[n - 1];
i -= n;
do {
@@ -491,7 +478,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) {
// [asmjit::X86Assembler - Reloc]
// ============================================================================
size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const noexcept {
uint32_t arch = getArch();
uint8_t* dst = static_cast<uint8_t*>(_dst);
@@ -510,8 +497,8 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
uint8_t* tramp = dst + minCodeSize;
// Relocate all recorded locations.
size_t relocCount = _relocList.getLength();
const RelocData* rdList = _relocList.getData();
size_t relocCount = _relocations.getLength();
const RelocData* rdList = _relocations.getData();
for (size_t i = 0; i < relocCount; i++) {
const RelocData& rd = rdList[i];
@@ -551,12 +538,12 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
}
switch (rd.size) {
case 8:
*reinterpret_cast<int64_t*>(dst + offset) = static_cast<int64_t>(ptr);
case 4:
Utils::writeU32u(dst + offset, static_cast<int32_t>(static_cast<SignedPtr>(ptr)));
break;
case 4:
*reinterpret_cast<int32_t*>(dst + offset) = static_cast<int32_t>(static_cast<SignedPtr>(ptr));
case 8:
Utils::writeI64u(dst + offset, static_cast<int64_t>(ptr));
break;
default:
@@ -582,14 +569,14 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
dst[offset - 1] = byte1;
// Absolute address.
((uint64_t*)tramp)[0] = static_cast<uint64_t>(rd.data);
Utils::writeU64u(tramp, static_cast<uint64_t>(rd.data));
// Advance trampoline pointer.
tramp += 8;
#if !defined(ASMJIT_DISABLE_LOGGER)
if (logger)
logger->logFormat(kLoggerStyleComment, "; Trampoline %llX\n", rd.data);
logger->logFormat(Logger::kStyleComment, "; Trampoline %llX\n", rd.data);
#endif // !ASMJIT_DISABLE_LOGGER
}
}
@@ -606,7 +593,7 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const {
#if !defined(ASMJIT_DISABLE_LOGGER)
// Logging helpers.
static const char* AssemblerX86_getAddressSizeString(uint32_t size) {
static const char* AssemblerX86_getAddressSizeString(uint32_t size) noexcept {
switch (size) {
case 1 : return "byte ptr ";
case 2 : return "word ptr ";
@@ -820,7 +807,7 @@ static void X86Assembler_dumpOperand(StringBuilder& sb, uint32_t arch, const Ope
}
sb._appendChar(prefix);
if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) {
if ((loggerOptions & Logger::kOptionHexDisplacement) != 0 && dispOffset > 9) {
sb._appendString("0x", 2);
base = 16;
}
@@ -833,7 +820,7 @@ static void X86Assembler_dumpOperand(StringBuilder& sb, uint32_t arch, const Ope
const Imm* i = static_cast<const Imm*>(op);
int64_t val = i->getInt64();
if ((loggerOptions & (1 << kLoggerOptionHexImmediate)) && static_cast<uint64_t>(val) > 9)
if ((loggerOptions & Logger::kOptionHexImmediate) != 0 && static_cast<uint64_t>(val) > 9)
sb.appendUInt(static_cast<uint64_t>(val), 16);
else
sb.appendInt(val, 10);
@@ -904,13 +891,13 @@ static bool X86Assembler_dumpInstruction(StringBuilder& sb,
#define HI_REG(_Index_) ((_kX86RegTypePatchedGpbHi << 8) | _Index_)
//! \internal
static const Operand::VRegOp x86PatchedHiRegs[4] = {
// --------------+---+--------------+--------------+------------+
// ----------------+---+--------------+--------------+------------+
// Operand | S | Reg. Code | OperandId | Unused |
// --------------+---+--------------+--------------+------------+
{ kOperandTypeReg, 1 , { HI_REG(4) }, kInvalidValue, {{ 0, 0 }} },
{ kOperandTypeReg, 1 , { HI_REG(5) }, kInvalidValue, {{ 0, 0 }} },
{ kOperandTypeReg, 1 , { HI_REG(6) }, kInvalidValue, {{ 0, 0 }} },
{ kOperandTypeReg, 1 , { HI_REG(7) }, kInvalidValue, {{ 0, 0 }} }
// ----------------+---+--------------+--------------+------------+
{ Operand::kTypeReg, 1 , { HI_REG(4) }, kInvalidValue, {{ 0, 0 }} },
{ Operand::kTypeReg, 1 , { HI_REG(5) }, kInvalidValue, {{ 0, 0 }} },
{ Operand::kTypeReg, 1 , { HI_REG(6) }, kInvalidValue, {{ 0, 0 }} },
{ Operand::kTypeReg, 1 , { HI_REG(7) }, kInvalidValue, {{ 0, 0 }} }
};
#undef HI_REG
@@ -997,8 +984,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
}
else {
// `W` field.
ASMJIT_ASSERT(static_cast<uint32_t>(kX86InstOptionRex) ==
static_cast<uint32_t>(kX86ByteRex));
ASMJIT_ASSERT(static_cast<uint32_t>(kX86InstOptionRex) == static_cast<uint32_t>(kX86ByteRex));
// Check if one or more register operand is one of BPL, SPL, SIL, DIL and
// force a REX prefix to be emitted in such case.
@@ -1282,12 +1268,12 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
intptr_t offs = label->offset - (intptr_t)(cursor - self->_buffer);
ASMJIT_ASSERT(offs <= 0);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
EMIT_DWORD(static_cast<int32_t>(offs - kRel32Size));
}
else {
// Non-bound label.
EMIT_OP(opCode);
EMIT_BYTE(opCode);
dispOffset = -4;
dispSize = 4;
relocId = -1;
@@ -1435,10 +1421,10 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
uint8_t imm8 = static_cast<uint8_t>(imVal & 0xFF);
if (imm8 == 0x03) {
EMIT_OP(opCode);
EMIT_BYTE(opCode);
}
else {
EMIT_OP(opCode + 1);
EMIT_BYTE(opCode + 1);
EMIT_BYTE(imm8);
}
goto _EmitDone;
@@ -1449,7 +1435,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
if (encoded == ENC_OPS(Label, None, None)) {
label = self->getLabelData(static_cast<const Label*>(o0)->getId());
if (self->hasFeature(kAssemblerFeaturePredictedJumps)) {
if (self->hasAsmOption(Assembler::kOptionPredictedJumps)) {
if (options & kInstOptionTaken)
EMIT_BYTE(0x3E);
if (options & kInstOptionNotTaken)
@@ -1465,7 +1451,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
ASMJIT_ASSERT(offs <= 0);
if ((options & kInstOptionLongForm) == 0 && Utils::isInt8(offs - kRel8Size)) {
EMIT_OP(opCode);
EMIT_BYTE(opCode);
EMIT_BYTE(offs - kRel8Size);
options |= kInstOptionShortForm;
@@ -1473,7 +1459,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
}
else {
EMIT_BYTE(0x0F);
EMIT_OP(opCode + 0x10);
EMIT_BYTE(opCode + 0x10);
EMIT_DWORD(static_cast<int32_t>(offs - kRel32Size));
options &= ~kInstOptionShortForm;
@@ -1483,7 +1469,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
else {
// Non-bound label.
if (options & kInstOptionShortForm) {
EMIT_OP(opCode);
EMIT_BYTE(opCode);
dispOffset = -1;
dispSize = 1;
relocId = -1;
@@ -1491,7 +1477,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
}
else {
EMIT_BYTE(0x0F);
EMIT_OP(opCode + 0x10);
EMIT_BYTE(opCode + 0x10);
dispOffset = -4;
dispSize = 4;
relocId = -1;
@@ -1868,9 +1854,9 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co
else {
_GroupPop_Gp:
// We allow 2 byte, 4 byte, and 8 byte register sizes, althought PUSH
// and POP only allows 2 bytes or register width. On 64-bit we simply
// and POP only allow 2 bytes or native size. On 64-bit we simply
// PUSH/POP 64-bit register even if 32-bit register was given.
if (o0->getSize() < 1)
if (o0->getSize() < 2)
goto _IllegalInst;
opCode = extendedInfo.getSecondaryOpCode();
@@ -2359,7 +2345,7 @@ _EmitFpArith_Mem:
}
EMIT_BYTE(0x0F);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
EMIT_BYTE(0xC0 | (opReg << 3));
goto _EmitDone;
@@ -3512,7 +3498,7 @@ _EmitX86Op:
// Instruction opcodes.
EMIT_MM(opCode);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
if (imLen != 0)
goto _EmitImm;
@@ -3542,7 +3528,7 @@ _EmitX86OpWithOpReg:
// Instruction opcodes.
opCode += opReg;
EMIT_MM(opCode);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
if (imLen != 0)
goto _EmitImm;
@@ -3573,7 +3559,7 @@ _EmitX86R:
// Instruction opcodes.
EMIT_MM(opCode);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
// ModR.
EMIT_BYTE(x86EncodeMod(3, opReg, static_cast<uint32_t>(rmReg)));
@@ -3585,7 +3571,7 @@ _EmitX86R:
_EmitX86M:
ASMJIT_ASSERT(rmMem != nullptr);
ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem);
ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem);
mBase = rmMem->getBase();
mIndex = rmMem->getIndex();
@@ -3632,7 +3618,7 @@ _EmitX86M:
// Instruction opcodes.
EMIT_MM(opCode);
EMIT_OP(opCode);
EMIT_BYTE(opCode);
// ... Fall through ...
// --------------------------------------------------------------------------
@@ -3724,7 +3710,7 @@ _EmitSib:
else if (rmMem->getMemType() == kMemTypeLabel) {
// Relative->Absolute [x86 mode].
label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocList.getLength();
relocId = self->_relocations.getLength();
RelocData rd;
rd.type = kRelocRelToAbs;
@@ -3732,12 +3718,12 @@ _EmitSib:
rd.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
rd.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocList.append(rd) != kErrorOk)
if (self->_relocations.append(rd) != kErrorOk)
return self->setLastError(kErrorNoHeapMemory);
if (label->offset != -1) {
// Bound label.
self->_relocList[relocId].data += static_cast<SignedPtr>(label->offset);
self->_relocations[relocId].data += static_cast<SignedPtr>(label->offset);
EMIT_DWORD(0);
}
else {
@@ -3749,7 +3735,7 @@ _EmitSib:
}
else {
// RIP->Absolute [x86 mode].
relocId = self->_relocList.getLength();
relocId = self->_relocations.getLength();
RelocData rd;
rd.type = kRelocRelToAbs;
@@ -3757,7 +3743,7 @@ _EmitSib:
rd.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
rd.data = rd.from + static_cast<SignedPtr>(dispOffset);
if (self->_relocList.append(rd) != kErrorOk)
if (self->_relocations.append(rd) != kErrorOk)
return self->setLastError(kErrorNoHeapMemory);
EMIT_DWORD(0);
@@ -3843,8 +3829,8 @@ _EmitFpuOp:
EMIT_PP(opCode);
// Instruction opcodes.
EMIT_OP(opCode >> 8);
EMIT_OP(opCode);
EMIT_BYTE(opCode >> 8);
EMIT_BYTE(opCode);
goto _EmitDone;
// --------------------------------------------------------------------------
@@ -3853,7 +3839,7 @@ _EmitFpuOp:
#define EMIT_AVX_M \
ASMJIT_ASSERT(rmMem != nullptr); \
ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem); \
ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem); \
\
if (rmMem->hasSegment()) { \
EMIT_BYTE(x86SegmentPrefix[rmMem->getSegment()]); \
@@ -3883,7 +3869,7 @@ _EmitFpuOp:
EMIT_BYTE(kX86ByteVex3); \
EMIT_BYTE(vex_rxbmmmmm); \
EMIT_BYTE(vex_XvvvvLpp); \
EMIT_OP(opCode); \
EMIT_BYTE(opCode); \
} \
else { \
vex_XvvvvLpp |= static_cast<uint32_t>(opReg << 4) & 0x80; \
@@ -3891,7 +3877,7 @@ _EmitFpuOp:
\
EMIT_BYTE(kX86ByteVex2); \
EMIT_BYTE(vex_XvvvvLpp); \
EMIT_OP(opCode); \
EMIT_BYTE(opCode); \
} \
} \
\
@@ -3911,14 +3897,14 @@ _EmitAvxOp:
uint32_t vex_rxbmmmmm = (opCode >> kX86InstOpCode_MM_Shift) | 0xE0;
EMIT_BYTE(kX86ByteVex3);
EMIT_OP(vex_rxbmmmmm);
EMIT_OP(vex_XvvvvLpp);
EMIT_OP(opCode);
EMIT_BYTE(vex_rxbmmmmm);
EMIT_BYTE(vex_XvvvvLpp);
EMIT_BYTE(opCode);
}
else {
EMIT_BYTE(kX86ByteVex2);
EMIT_OP(vex_XvvvvLpp);
EMIT_OP(opCode);
EMIT_BYTE(vex_XvvvvLpp);
EMIT_BYTE(opCode);
}
}
goto _EmitDone;
@@ -3942,9 +3928,9 @@ _EmitAvxR:
vex_XvvvvLpp ^= 0x78;
EMIT_BYTE(kX86ByteVex3);
EMIT_OP(vex_rxbmmmmm);
EMIT_OP(vex_XvvvvLpp);
EMIT_OP(opCode);
EMIT_BYTE(vex_rxbmmmmm);
EMIT_BYTE(vex_XvvvvLpp);
EMIT_BYTE(opCode);
rmReg &= 0x07;
}
@@ -3953,8 +3939,8 @@ _EmitAvxR:
vex_XvvvvLpp ^= 0xF8;
EMIT_BYTE(kX86ByteVex2);
EMIT_OP(vex_XvvvvLpp);
EMIT_OP(opCode);
EMIT_BYTE(vex_XvvvvLpp);
EMIT_BYTE(opCode);
}
}
@@ -4014,7 +4000,7 @@ _EmitAvxV:
// Relative->Absolute [x86 mode].
label = self->getLabelData(rmMem->_vmem.base);
relocId = self->_relocList.getLength();
relocId = self->_relocations.getLength();
{
RelocData rd;
@@ -4023,13 +4009,13 @@ _EmitAvxV:
rd.from = static_cast<Ptr>((uintptr_t)(cursor - self->_buffer));
rd.data = static_cast<SignedPtr>(dispOffset);
if (self->_relocList.append(rd) != kErrorOk)
if (self->_relocations.append(rd) != kErrorOk)
return self->setLastError(kErrorNoHeapMemory);
}
if (label->offset != -1) {
// Bound label.
self->_relocList[relocId].data += static_cast<SignedPtr>(label->offset);
self->_relocations[relocId].data += static_cast<SignedPtr>(label->offset);
EMIT_DWORD(0);
}
else {
@@ -4052,7 +4038,7 @@ _EmitAvxV:
#define EMIT_XOP_M \
ASMJIT_ASSERT(rmMem != nullptr); \
ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem); \
ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem); \
\
if (rmMem->hasSegment()) { \
EMIT_BYTE(x86SegmentPrefix[rmMem->getSegment()]); \
@@ -4081,7 +4067,7 @@ _EmitAvxV:
EMIT_BYTE(kX86ByteXop3); \
EMIT_BYTE(vex_rxbmmmmm); \
EMIT_BYTE(vex_XvvvvLpp); \
EMIT_OP(opCode); \
EMIT_BYTE(opCode); \
} \
\
mBase &= 0x07; \
@@ -4105,9 +4091,9 @@ _EmitXopR:
xop_XvvvvLpp ^= 0x78;
EMIT_BYTE(kX86ByteXop3);
EMIT_OP(xop_rxbmmmmm);
EMIT_OP(xop_XvvvvLpp);
EMIT_OP(opCode);
EMIT_BYTE(xop_rxbmmmmm);
EMIT_BYTE(xop_XvvvvLpp);
EMIT_BYTE(opCode);
rmReg &= 0x07;
}
@@ -4168,10 +4154,10 @@ _EmitJmpOrCallAbs:
// Both `jmp` and `call` instructions have a single-byte opcode and are
// followed by a 32-bit displacement.
EMIT_OP(opCode);
EMIT_BYTE(opCode);
EMIT_DWORD(0);
if (self->_relocList.append(rd) != kErrorOk)
if (self->_relocations.append(rd) != kErrorOk)
return self->setLastError(kErrorNoHeapMemory);
// Reserve space for a possible trampoline.
@@ -4227,7 +4213,7 @@ _EmitDone:
X86Assembler_dumpInstruction(sb, Arch, code, options, o0, o1, o2, o3, loggerOptions);
if ((loggerOptions & (1 << kLoggerOptionBinaryForm)) != 0)
if ((loggerOptions & Logger::kOptionBinaryForm) != 0)
LogUtil::formatLine(sb, self->_cursor, (intptr_t)(cursor - self->_cursor), dispSize, imLen, self->_comment);
else
LogUtil::formatLine(sb, nullptr, kInvalidIndex, 0, 0, self->_comment);
@@ -4235,7 +4221,7 @@ _EmitDone:
# if defined(ASMJIT_DEBUG)
if (self->_logger)
# endif // ASMJIT_DEBUG
self->_logger->logString(kLoggerStyleDefault, sb.getData(), sb.getLength());
self->_logger->logString(Logger::kStyleDefault, sb.getData(), sb.getLength());
# if defined(ASMJIT_DEBUG)
// This shouldn't happen.
@@ -4257,8 +4243,10 @@ _EmitDone:
Error X86Assembler::_emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) {
#if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64)
ASMJIT_ASSERT(_arch == kArchX86);
return X86Assembler_emit<kArchX86>(this, code, &o0, &o1, &o2, &o3);
#elif !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64)
ASMJIT_ASSERT(_arch == kArchX64);
return X86Assembler_emit<kArchX64>(this, code, &o0, &o1, &o2, &o3);
#else
if (_arch == kArchX86)

View File

@@ -162,10 +162,10 @@ namespace asmjit {
//! a.ret();
//! ~~~
//!
//! You can see that syntax is very close to Intel one. Only difference is that
//! you are calling functions that emit binary code for you. All registers are
//! in `asmjit::x86` namespace, so it's very comfortable to use it (look at the
//! `use namespace` section). Without importing `asmjit::x86` registers would
//! You can see that syntax is very close to the Intel one. Only difference is
//! that you are calling functions that emit binary code for you. All registers
//! are in `asmjit::x86` namespace, so it's very comfortable to use it (look at
//! the `use namespace` section). Without importing `asmjit::x86` registers would
//! have to be written as `x86::eax`, `x86::esp`, and so on.
//!
//! There is also possibility to use memory addresses and immediates. Use
@@ -278,7 +278,7 @@ namespace asmjit {
//! code with labels. Labels are fully supported and you can call `jmp` or
//! `je` (and similar) instructions to initialized or yet uninitialized label.
//! Each label expects to be bound into offset. To bind label to specific
//! offset, use `CodeGen::bind()` method.
//! offset, use `Assembler::bind()` function.
//!
//! See next example that contains complete code that creates simple memory
//! copy function (in DWord entities).
@@ -292,14 +292,14 @@ namespace asmjit {
//!
//! // Assembler instance.
//! JitRuntime runtime;
//! Assembler a(&runtime);
//! X86Assembler a(&runtime);
//!
//! // Constants.
//! const int arg_offset = 8; // Arguments offset (STDCALL EBP).
//! const int arg_size = 12; // Arguments size.
//!
//! // Labels.
//! Label L_Loop(a);
//! Label L_Loop = a.newLabel();
//!
//! // Prolog.
//! a.push(ebp);
@@ -491,13 +491,13 @@ struct ASMJIT_VIRTAPI X86Assembler : public Assembler {
// [Align]
// --------------------------------------------------------------------------
ASMJIT_API virtual Error align(uint32_t alignMode, uint32_t offset);
ASMJIT_API virtual Error align(uint32_t alignMode, uint32_t offset) noexcept;
// --------------------------------------------------------------------------
// [Reloc]
// --------------------------------------------------------------------------
ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const;
ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const noexcept;
// --------------------------------------------------------------------------
// [Emit]
@@ -539,164 +539,164 @@ struct ASMJIT_VIRTAPI X86Assembler : public Assembler {
// [Emit]
// --------------------------------------------------------------------------
#define INST_0x(_Inst_, _Code_) \
ASMJIT_INLINE Error _Inst_() { \
return emit(_Code_); \
#define INST_0x(inst, code) \
ASMJIT_INLINE Error inst() { \
return emit(code); \
}
#define INST_1x(_Inst_, _Code_, _Op0_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0) { \
return emit(_Code_, o0); \
#define INST_1x(inst, code, T0) \
ASMJIT_INLINE Error inst(const T0& o0) { \
return emit(code, o0); \
}
#define INST_1i(_Inst_, _Code_, _Op0_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0) { return emit(_Code_, o0); } \
#define INST_1i(inst, code, T0) \
ASMJIT_INLINE Error inst(const T0& o0) { return emit(code, o0); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(int o0) { return emit(_Code_, Utils::asInt(o0)); } \
ASMJIT_INLINE Error inst(int o0) { return emit(code, Utils::asInt(o0)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(unsigned int o0) { return emit(_Code_, Utils::asInt(o0)); } \
ASMJIT_INLINE Error inst(unsigned int o0) { return emit(code, Utils::asInt(o0)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(int64_t o0) { return emit(_Code_, Utils::asInt(o0)); } \
ASMJIT_INLINE Error inst(int64_t o0) { return emit(code, Utils::asInt(o0)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(uint64_t o0) { return emit(_Code_, Utils::asInt(o0)); }
ASMJIT_INLINE Error inst(uint64_t o0) { return emit(code, Utils::asInt(o0)); }
#define INST_1cc(_Inst_, _Code_, _Translate_, _Op0_) \
ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0) { \
#define INST_1cc(inst, code, _Translate_, T0) \
ASMJIT_INLINE Error inst(uint32_t cc, const T0& o0) { \
return emit(_Translate_(cc), o0); \
} \
\
ASMJIT_INLINE Error _Inst_##a(const _Op0_& o0) { return emit(_Code_##a, o0); } \
ASMJIT_INLINE Error _Inst_##ae(const _Op0_& o0) { return emit(_Code_##ae, o0); } \
ASMJIT_INLINE Error _Inst_##b(const _Op0_& o0) { return emit(_Code_##b, o0); } \
ASMJIT_INLINE Error _Inst_##be(const _Op0_& o0) { return emit(_Code_##be, o0); } \
ASMJIT_INLINE Error _Inst_##c(const _Op0_& o0) { return emit(_Code_##c, o0); } \
ASMJIT_INLINE Error _Inst_##e(const _Op0_& o0) { return emit(_Code_##e, o0); } \
ASMJIT_INLINE Error _Inst_##g(const _Op0_& o0) { return emit(_Code_##g, o0); } \
ASMJIT_INLINE Error _Inst_##ge(const _Op0_& o0) { return emit(_Code_##ge, o0); } \
ASMJIT_INLINE Error _Inst_##l(const _Op0_& o0) { return emit(_Code_##l, o0); } \
ASMJIT_INLINE Error _Inst_##le(const _Op0_& o0) { return emit(_Code_##le, o0); } \
ASMJIT_INLINE Error _Inst_##na(const _Op0_& o0) { return emit(_Code_##na, o0); } \
ASMJIT_INLINE Error _Inst_##nae(const _Op0_& o0) { return emit(_Code_##nae, o0); } \
ASMJIT_INLINE Error _Inst_##nb(const _Op0_& o0) { return emit(_Code_##nb, o0); } \
ASMJIT_INLINE Error _Inst_##nbe(const _Op0_& o0) { return emit(_Code_##nbe, o0); } \
ASMJIT_INLINE Error _Inst_##nc(const _Op0_& o0) { return emit(_Code_##nc, o0); } \
ASMJIT_INLINE Error _Inst_##ne(const _Op0_& o0) { return emit(_Code_##ne, o0); } \
ASMJIT_INLINE Error _Inst_##ng(const _Op0_& o0) { return emit(_Code_##ng, o0); } \
ASMJIT_INLINE Error _Inst_##nge(const _Op0_& o0) { return emit(_Code_##nge, o0); } \
ASMJIT_INLINE Error _Inst_##nl(const _Op0_& o0) { return emit(_Code_##nl, o0); } \
ASMJIT_INLINE Error _Inst_##nle(const _Op0_& o0) { return emit(_Code_##nle, o0); } \
ASMJIT_INLINE Error _Inst_##no(const _Op0_& o0) { return emit(_Code_##no, o0); } \
ASMJIT_INLINE Error _Inst_##np(const _Op0_& o0) { return emit(_Code_##np, o0); } \
ASMJIT_INLINE Error _Inst_##ns(const _Op0_& o0) { return emit(_Code_##ns, o0); } \
ASMJIT_INLINE Error _Inst_##nz(const _Op0_& o0) { return emit(_Code_##nz, o0); } \
ASMJIT_INLINE Error _Inst_##o(const _Op0_& o0) { return emit(_Code_##o, o0); } \
ASMJIT_INLINE Error _Inst_##p(const _Op0_& o0) { return emit(_Code_##p, o0); } \
ASMJIT_INLINE Error _Inst_##pe(const _Op0_& o0) { return emit(_Code_##pe, o0); } \
ASMJIT_INLINE Error _Inst_##po(const _Op0_& o0) { return emit(_Code_##po, o0); } \
ASMJIT_INLINE Error _Inst_##s(const _Op0_& o0) { return emit(_Code_##s, o0); } \
ASMJIT_INLINE Error _Inst_##z(const _Op0_& o0) { return emit(_Code_##z, o0); }
ASMJIT_INLINE Error inst##a(const T0& o0) { return emit(code##a, o0); } \
ASMJIT_INLINE Error inst##ae(const T0& o0) { return emit(code##ae, o0); } \
ASMJIT_INLINE Error inst##b(const T0& o0) { return emit(code##b, o0); } \
ASMJIT_INLINE Error inst##be(const T0& o0) { return emit(code##be, o0); } \
ASMJIT_INLINE Error inst##c(const T0& o0) { return emit(code##c, o0); } \
ASMJIT_INLINE Error inst##e(const T0& o0) { return emit(code##e, o0); } \
ASMJIT_INLINE Error inst##g(const T0& o0) { return emit(code##g, o0); } \
ASMJIT_INLINE Error inst##ge(const T0& o0) { return emit(code##ge, o0); } \
ASMJIT_INLINE Error inst##l(const T0& o0) { return emit(code##l, o0); } \
ASMJIT_INLINE Error inst##le(const T0& o0) { return emit(code##le, o0); } \
ASMJIT_INLINE Error inst##na(const T0& o0) { return emit(code##na, o0); } \
ASMJIT_INLINE Error inst##nae(const T0& o0) { return emit(code##nae, o0); } \
ASMJIT_INLINE Error inst##nb(const T0& o0) { return emit(code##nb, o0); } \
ASMJIT_INLINE Error inst##nbe(const T0& o0) { return emit(code##nbe, o0); } \
ASMJIT_INLINE Error inst##nc(const T0& o0) { return emit(code##nc, o0); } \
ASMJIT_INLINE Error inst##ne(const T0& o0) { return emit(code##ne, o0); } \
ASMJIT_INLINE Error inst##ng(const T0& o0) { return emit(code##ng, o0); } \
ASMJIT_INLINE Error inst##nge(const T0& o0) { return emit(code##nge, o0); } \
ASMJIT_INLINE Error inst##nl(const T0& o0) { return emit(code##nl, o0); } \
ASMJIT_INLINE Error inst##nle(const T0& o0) { return emit(code##nle, o0); } \
ASMJIT_INLINE Error inst##no(const T0& o0) { return emit(code##no, o0); } \
ASMJIT_INLINE Error inst##np(const T0& o0) { return emit(code##np, o0); } \
ASMJIT_INLINE Error inst##ns(const T0& o0) { return emit(code##ns, o0); } \
ASMJIT_INLINE Error inst##nz(const T0& o0) { return emit(code##nz, o0); } \
ASMJIT_INLINE Error inst##o(const T0& o0) { return emit(code##o, o0); } \
ASMJIT_INLINE Error inst##p(const T0& o0) { return emit(code##p, o0); } \
ASMJIT_INLINE Error inst##pe(const T0& o0) { return emit(code##pe, o0); } \
ASMJIT_INLINE Error inst##po(const T0& o0) { return emit(code##po, o0); } \
ASMJIT_INLINE Error inst##s(const T0& o0) { return emit(code##s, o0); } \
ASMJIT_INLINE Error inst##z(const T0& o0) { return emit(code##z, o0); }
#define INST_2x(_Inst_, _Code_, _Op0_, _Op1_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1) { \
return emit(_Code_, o0, o1); \
#define INST_2x(inst, code, T0, T1) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1) { \
return emit(code, o0, o1); \
}
#define INST_2i(_Inst_, _Code_, _Op0_, _Op1_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_, o0, o1); } \
#define INST_2i(inst, code, T0, T1) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1) { return emit(code, o0, o1); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \
ASMJIT_INLINE Error inst(const T0& o0, int o1) { return emit(code, o0, Utils::asInt(o1)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \
ASMJIT_INLINE Error inst(const T0& o0, unsigned int o1) { return emit(code, o0, Utils::asInt(o1)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \
ASMJIT_INLINE Error inst(const T0& o0, int64_t o1) { return emit(code, o0, Utils::asInt(o1)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); }
ASMJIT_INLINE Error inst(const T0& o0, uint64_t o1) { return emit(code, o0, Utils::asInt(o1)); }
#define INST_2cc(_Inst_, _Code_, _Translate_, _Op0_, _Op1_) \
ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \
#define INST_2cc(inst, code, _Translate_, T0, T1) \
ASMJIT_INLINE Error inst(uint32_t cc, const T0& o0, const T1& o1) { \
return emit(_Translate_(cc), o0, o1); \
} \
\
ASMJIT_INLINE Error _Inst_##a(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##a, o0, o1); } \
ASMJIT_INLINE Error _Inst_##ae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ae, o0, o1); } \
ASMJIT_INLINE Error _Inst_##b(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##b, o0, o1); } \
ASMJIT_INLINE Error _Inst_##be(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##be, o0, o1); } \
ASMJIT_INLINE Error _Inst_##c(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##c, o0, o1); } \
ASMJIT_INLINE Error _Inst_##e(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##e, o0, o1); } \
ASMJIT_INLINE Error _Inst_##g(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##g, o0, o1); } \
ASMJIT_INLINE Error _Inst_##ge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ge, o0, o1); } \
ASMJIT_INLINE Error _Inst_##l(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##l, o0, o1); } \
ASMJIT_INLINE Error _Inst_##le(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##le, o0, o1); } \
ASMJIT_INLINE Error _Inst_##na(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##na, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nae, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nb(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nb, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nbe, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nc(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nc, o0, o1); } \
ASMJIT_INLINE Error _Inst_##ne(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ne, o0, o1); } \
ASMJIT_INLINE Error _Inst_##ng(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ng, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nge, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nl(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nl, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nle(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nle, o0, o1); } \
ASMJIT_INLINE Error _Inst_##no(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##no, o0, o1); } \
ASMJIT_INLINE Error _Inst_##np(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##np, o0, o1); } \
ASMJIT_INLINE Error _Inst_##ns(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ns, o0, o1); } \
ASMJIT_INLINE Error _Inst_##nz(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nz, o0, o1); } \
ASMJIT_INLINE Error _Inst_##o(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##o, o0, o1); } \
ASMJIT_INLINE Error _Inst_##p(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##p, o0, o1); } \
ASMJIT_INLINE Error _Inst_##pe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##pe, o0, o1); } \
ASMJIT_INLINE Error _Inst_##po(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##po, o0, o1); } \
ASMJIT_INLINE Error _Inst_##s(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##s, o0, o1); } \
ASMJIT_INLINE Error _Inst_##z(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##z, o0, o1); }
ASMJIT_INLINE Error inst##a(const T0& o0, const T1& o1) { return emit(code##a, o0, o1); } \
ASMJIT_INLINE Error inst##ae(const T0& o0, const T1& o1) { return emit(code##ae, o0, o1); } \
ASMJIT_INLINE Error inst##b(const T0& o0, const T1& o1) { return emit(code##b, o0, o1); } \
ASMJIT_INLINE Error inst##be(const T0& o0, const T1& o1) { return emit(code##be, o0, o1); } \
ASMJIT_INLINE Error inst##c(const T0& o0, const T1& o1) { return emit(code##c, o0, o1); } \
ASMJIT_INLINE Error inst##e(const T0& o0, const T1& o1) { return emit(code##e, o0, o1); } \
ASMJIT_INLINE Error inst##g(const T0& o0, const T1& o1) { return emit(code##g, o0, o1); } \
ASMJIT_INLINE Error inst##ge(const T0& o0, const T1& o1) { return emit(code##ge, o0, o1); } \
ASMJIT_INLINE Error inst##l(const T0& o0, const T1& o1) { return emit(code##l, o0, o1); } \
ASMJIT_INLINE Error inst##le(const T0& o0, const T1& o1) { return emit(code##le, o0, o1); } \
ASMJIT_INLINE Error inst##na(const T0& o0, const T1& o1) { return emit(code##na, o0, o1); } \
ASMJIT_INLINE Error inst##nae(const T0& o0, const T1& o1) { return emit(code##nae, o0, o1); } \
ASMJIT_INLINE Error inst##nb(const T0& o0, const T1& o1) { return emit(code##nb, o0, o1); } \
ASMJIT_INLINE Error inst##nbe(const T0& o0, const T1& o1) { return emit(code##nbe, o0, o1); } \
ASMJIT_INLINE Error inst##nc(const T0& o0, const T1& o1) { return emit(code##nc, o0, o1); } \
ASMJIT_INLINE Error inst##ne(const T0& o0, const T1& o1) { return emit(code##ne, o0, o1); } \
ASMJIT_INLINE Error inst##ng(const T0& o0, const T1& o1) { return emit(code##ng, o0, o1); } \
ASMJIT_INLINE Error inst##nge(const T0& o0, const T1& o1) { return emit(code##nge, o0, o1); } \
ASMJIT_INLINE Error inst##nl(const T0& o0, const T1& o1) { return emit(code##nl, o0, o1); } \
ASMJIT_INLINE Error inst##nle(const T0& o0, const T1& o1) { return emit(code##nle, o0, o1); } \
ASMJIT_INLINE Error inst##no(const T0& o0, const T1& o1) { return emit(code##no, o0, o1); } \
ASMJIT_INLINE Error inst##np(const T0& o0, const T1& o1) { return emit(code##np, o0, o1); } \
ASMJIT_INLINE Error inst##ns(const T0& o0, const T1& o1) { return emit(code##ns, o0, o1); } \
ASMJIT_INLINE Error inst##nz(const T0& o0, const T1& o1) { return emit(code##nz, o0, o1); } \
ASMJIT_INLINE Error inst##o(const T0& o0, const T1& o1) { return emit(code##o, o0, o1); } \
ASMJIT_INLINE Error inst##p(const T0& o0, const T1& o1) { return emit(code##p, o0, o1); } \
ASMJIT_INLINE Error inst##pe(const T0& o0, const T1& o1) { return emit(code##pe, o0, o1); } \
ASMJIT_INLINE Error inst##po(const T0& o0, const T1& o1) { return emit(code##po, o0, o1); } \
ASMJIT_INLINE Error inst##s(const T0& o0, const T1& o1) { return emit(code##s, o0, o1); } \
ASMJIT_INLINE Error inst##z(const T0& o0, const T1& o1) { return emit(code##z, o0, o1); }
#define INST_3x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); }
#define INST_3x(inst, code, T0, T1, T2) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); }
#define INST_3i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \
#define INST_3i(inst, code, T0, T1, T2) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, unsigned int o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int64_t o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); }
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, uint64_t o2) { return emit(code, o0, o1, Utils::asInt(o2)); }
#define INST_3ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \
#define INST_3ii(inst, code, T0, T1, T2) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1, int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, int o1, int o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, unsigned int o1, unsigned int o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \
ASMJIT_INLINE Error inst(const T0& o0, int64_t o1, int64_t o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); }
ASMJIT_INLINE Error inst(const T0& o0, uint64_t o1, uint64_t o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); }
#define INST_4x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); }
#define INST_4x(inst, code, T0, T1, T2, T3) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); }
#define INST_4i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \
#define INST_4i(inst, code, T0, T1, T2, T3) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, int o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, unsigned int o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, int64_t o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); }
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, uint64_t o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); }
#define INST_4ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \
#define INST_4ii(inst, code, T0, T1, T2, T3) \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int o2, int o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, unsigned int o2, unsigned int o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int64_t o2, int64_t o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \
/*! \overload */ \
ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); }
ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, uint64_t o2, uint64_t o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); }
// --------------------------------------------------------------------------
// [X86/X64]

View File

@@ -43,39 +43,35 @@ namespace asmjit {
// [asmjit::X86VarInfo]
// ============================================================================
#define C(_Class_) kX86RegClass##_Class_
#define D(_Desc_) kVarFlag##_Desc_
const X86VarInfo _x86VarInfo[] = {
/* 00: kVarTypeInt8 */ { kX86RegTypeGpbLo, 1 , C(Gp) , 0 , "gpb" },
/* 01: kVarTypeUInt8 */ { kX86RegTypeGpbLo, 1 , C(Gp) , 0 , "gpb" },
/* 02: kVarTypeInt16 */ { kX86RegTypeGpw , 2 , C(Gp) , 0 , "gpw" },
/* 03: kVarTypeUInt16 */ { kX86RegTypeGpw , 2 , C(Gp) , 0 , "gpw" },
/* 04: kVarTypeInt32 */ { kX86RegTypeGpd , 4 , C(Gp) , 0 , "gpd" },
/* 05: kVarTypeUInt32 */ { kX86RegTypeGpd , 4 , C(Gp) , 0 , "gpd" },
/* 06: kVarTypeInt64 */ { kX86RegTypeGpq , 8 , C(Gp) , 0 , "gpq" },
/* 07: kVarTypeUInt64 */ { kX86RegTypeGpq , 8 , C(Gp) , 0 , "gpq" },
/* 08: kVarTypeIntPtr */ { 0 , 0 , C(Gp) , 0 , "" }, // Remapped.
/* 09: kVarTypeUIntPtr */ { 0 , 0 , C(Gp) , 0 , "" }, // Remapped.
/* 10: kVarTypeFp32 */ { kX86RegTypeFp , 4 , C(Fp) , D(Sp) , "fp" },
/* 11: kVarTypeFp64 */ { kX86RegTypeFp , 8 , C(Fp) , D(Dp) , "fp" },
/* 12: kX86VarTypeMm */ { kX86RegTypeMm , 8 , C(Mm) , 0 , "mm" },
/* 13: kX86VarTypeK */ { kX86RegTypeK , 8 , C(K) , 0 , "k" },
/* 14: kX86VarTypeXmm */ { kX86RegTypeXmm , 16, C(Xyz), 0 , "xmm" },
/* 15: kX86VarTypeXmmSs */ { kX86RegTypeXmm , 4 , C(Xyz), D(Sp) , "xmm" },
/* 16: kX86VarTypeXmmPs */ { kX86RegTypeXmm , 16, C(Xyz), D(Sp) | D(Packed), "xmm" },
/* 17: kX86VarTypeXmmSd */ { kX86RegTypeXmm , 8 , C(Xyz), D(Dp) , "xmm" },
/* 18: kX86VarTypeXmmPd */ { kX86RegTypeXmm , 16, C(Xyz), D(Dp) | D(Packed), "xmm" },
/* 19: kX86VarTypeYmm */ { kX86RegTypeYmm , 32, C(Xyz), 0 , "ymm" },
/* 20: kX86VarTypeYmmPs */ { kX86RegTypeYmm , 32, C(Xyz), D(Sp) | D(Packed), "ymm" },
/* 21: kX86VarTypeYmmPd */ { kX86RegTypeYmm , 32, C(Xyz), D(Dp) | D(Packed), "ymm" },
/* 22: kX86VarTypeZmm */ { kX86RegTypeZmm , 64, C(Xyz), 0 , "zmm" },
/* 23: kX86VarTypeZmmPs */ { kX86RegTypeZmm , 64, C(Xyz), D(Sp) | D(Packed), "zmm" },
/* 24: kX86VarTypeZmmPd */ { kX86RegTypeZmm , 64, C(Xyz), D(Dp) | D(Packed), "zmm" }
#define F(flag) VarInfo::kFlag##flag
const VarInfo _x86VarInfo[] = {
{ kVarTypeInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" },
{ kVarTypeUInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" },
{ kVarTypeInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" },
{ kVarTypeUInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" },
{ kVarTypeInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" },
{ kVarTypeUInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" },
{ kVarTypeInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" },
{ kVarTypeUInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" },
{ kVarTypeIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract.
{ kVarTypeUIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract.
{ kVarTypeFp32 , 4 , kX86RegClassFp , kX86RegTypeFp , F(SP) , "fp" },
{ kVarTypeFp64 , 8 , kX86RegClassFp , kX86RegTypeFp , F(DP) , "fp" },
{ kX86VarTypeMm , 8 , kX86RegClassMm , kX86RegTypeMm , 0 | F(SIMD), "mm" },
{ kX86VarTypeK , 8 , kX86RegClassK , kX86RegTypeK , 0 , "k" },
{ kX86VarTypeXmm , 16, kX86RegClassXyz, kX86RegTypeXmm , 0 | F(SIMD), "xmm" },
{ kX86VarTypeXmmSs, 4 , kX86RegClassXyz, kX86RegTypeXmm , F(SP) , "xmm" },
{ kX86VarTypeXmmPs, 16, kX86RegClassXyz, kX86RegTypeXmm , F(SP) | F(SIMD), "xmm" },
{ kX86VarTypeXmmSd, 8 , kX86RegClassXyz, kX86RegTypeXmm , F(DP) , "xmm" },
{ kX86VarTypeXmmPd, 16, kX86RegClassXyz, kX86RegTypeXmm , F(DP) | F(SIMD), "xmm" },
{ kX86VarTypeYmm , 32, kX86RegClassXyz, kX86RegTypeYmm , 0 | F(SIMD), "ymm" },
{ kX86VarTypeYmmPs, 32, kX86RegClassXyz, kX86RegTypeYmm , F(SP) | F(SIMD), "ymm" },
{ kX86VarTypeYmmPd, 32, kX86RegClassXyz, kX86RegTypeYmm , F(DP) | F(SIMD), "ymm" },
{ kX86VarTypeZmm , 64, kX86RegClassXyz, kX86RegTypeZmm , 0 | F(SIMD), "zmm" },
{ kX86VarTypeZmmPs, 64, kX86RegClassXyz, kX86RegTypeZmm , F(SP) | F(SIMD), "zmm" },
{ kX86VarTypeZmmPd, 64, kX86RegClassXyz, kX86RegTypeZmm , F(DP) | F(SIMD), "zmm" }
};
#undef D
#undef C
#undef F
#if defined(ASMJIT_BUILD_X86)
const uint8_t _x86VarMapping[kX86VarTypeCount] = {
@@ -141,7 +137,7 @@ const uint8_t _x64VarMapping[kX86VarTypeCount] = {
// [asmjit::X86CallNode - Arg / Ret]
// ============================================================================
bool X86CallNode::_setArg(uint32_t i, const Operand& op) {
bool X86CallNode::_setArg(uint32_t i, const Operand& op) noexcept {
if ((i & ~kFuncArgHi) >= _x86Decl.getNumArgs())
return false;
@@ -149,7 +145,7 @@ bool X86CallNode::_setArg(uint32_t i, const Operand& op) {
return true;
}
bool X86CallNode::_setRet(uint32_t i, const Operand& op) {
bool X86CallNode::_setRet(uint32_t i, const Operand& op) noexcept {
if (i >= 2)
return false;
@@ -161,7 +157,7 @@ bool X86CallNode::_setRet(uint32_t i, const Operand& op) {
// [asmjit::X86Compiler - Construction / Destruction]
// ============================================================================
X86Compiler::X86Compiler(X86Assembler* assembler)
X86Compiler::X86Compiler(X86Assembler* assembler) noexcept
: Compiler(),
zax(NoInit),
zcx(NoInit),
@@ -186,7 +182,7 @@ X86Compiler::X86Compiler(X86Assembler* assembler)
attach(assembler);
}
X86Compiler::~X86Compiler() {
X86Compiler::~X86Compiler() noexcept {
reset(true);
}
@@ -194,7 +190,7 @@ X86Compiler::~X86Compiler() {
// [asmjit::X86Compiler - Attach / Reset]
// ============================================================================
Error X86Compiler::attach(Assembler* assembler) {
Error X86Compiler::attach(Assembler* assembler) noexcept {
ASMJIT_ASSERT(assembler != nullptr);
if (_assembler != nullptr)
@@ -224,7 +220,6 @@ Error X86Compiler::attach(Assembler* assembler) {
_regSize = static_cast<uint8_t>(assembler->getRegSize());
_regCount = static_cast<X86Assembler*>(assembler)->getRegCount();
_finalized = false;
_lastError = kErrorOk;
zax = static_cast<X86Assembler*>(assembler)->zax;
zcx = static_cast<X86Assembler*>(assembler)->zcx;
@@ -238,7 +233,7 @@ Error X86Compiler::attach(Assembler* assembler) {
return kErrorOk;
}
void X86Compiler::reset(bool releaseMemory) {
void X86Compiler::reset(bool releaseMemory) noexcept {
Compiler::reset(releaseMemory);
_regCount.reset();
@@ -256,7 +251,7 @@ void X86Compiler::reset(bool releaseMemory) {
// [asmjit::X86Compiler - Finalize]
// ============================================================================
Error X86Compiler::finalize() {
Error X86Compiler::finalize() noexcept {
X86Assembler* assembler = getAssembler();
if (assembler == nullptr)
return kErrorOk;
@@ -283,7 +278,7 @@ Error X86Compiler::finalize() {
start = node;
_resetTokenGenerator();
if (node->getType() == kHLNodeTypeFunc) {
if (node->getType() == HLNode::kTypeFunc) {
node = static_cast<X86FuncNode*>(start)->getEnd();
error = context.compile(static_cast<X86FuncNode*>(start));
@@ -293,7 +288,7 @@ Error X86Compiler::finalize() {
do {
node = node->getNext();
} while (node != nullptr && node->getType() != kHLNodeTypeFunc);
} while (node != nullptr && node->getType() != HLNode::kTypeFunc);
error = context.serialize(assembler, start, node);
context.cleanup();
@@ -311,11 +306,11 @@ Error X86Compiler::finalize() {
// ============================================================================
//! Get compiler instruction item size without operands assigned.
static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) {
static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) noexcept {
return Utils::inInterval<uint32_t>(code, _kX86InstIdJbegin, _kX86InstIdJend) ? sizeof(HLJump) : sizeof(HLInst);
}
static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) {
static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) noexcept {
if (Utils::inInterval<uint32_t>(code, _kX86InstIdJbegin, _kX86InstIdJend)) {
HLJump* node = new(p) HLJump(self, code, options, opList, opCount);
HLLabel* jTarget = nullptr;
@@ -327,7 +322,7 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui
options |= kInstOptionUnfollow;
}
node->orFlags(code == kX86InstIdJmp ? kHLNodeFlagIsJmp | kHLNodeFlagIsTaken : kHLNodeFlagIsJcc);
node->orFlags(code == kX86InstIdJmp ? HLNode::kFlagIsJmp | HLNode::kFlagIsTaken : HLNode::kFlagIsJcc);
node->_target = jTarget;
node->_jumpNext = nullptr;
@@ -339,9 +334,9 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui
// The 'jmp' is always taken, conditional jump can contain hint, we detect it.
if (code == kX86InstIdJmp)
node->orFlags(kHLNodeFlagIsTaken);
node->orFlags(HLNode::kFlagIsTaken);
else if (options & kInstOptionTaken)
node->orFlags(kHLNodeFlagIsTaken);
node->orFlags(HLNode::kFlagIsTaken);
node->addOptions(options);
return node;
@@ -353,7 +348,7 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui
}
}
HLInst* X86Compiler::newInst(uint32_t code) {
HLInst* X86Compiler::newInst(uint32_t code) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size));
@@ -367,7 +362,7 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) {
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 1 * sizeof(Operand)));
@@ -386,7 +381,7 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) {
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 2 * sizeof(Operand)));
@@ -407,7 +402,7 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 3 * sizeof(Operand)));
@@ -430,7 +425,7 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) {
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 4 * sizeof(Operand)));
@@ -455,7 +450,7 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) {
HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept {
size_t size = X86Compiler_getInstSize(code);
HLInst* inst = static_cast<HLInst*>(_zoneAllocator.alloc(size + 5 * sizeof(Operand)));
@@ -482,49 +477,49 @@ _NoMemory:
return nullptr;
}
HLInst* X86Compiler::emit(uint32_t code) {
HLInst* X86Compiler::emit(uint32_t code) noexcept {
HLInst* node = newInst(code);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) noexcept {
HLInst* node = newInst(code, o0);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1){
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1) noexcept {
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept {
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3){
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept {
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept {
HLInst* node = newInst(code, o0, o1, o2, o3, o4);
if (node == nullptr)
return nullptr;
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, int o0_) {
HLInst* X86Compiler::emit(uint32_t code, int o0_) noexcept {
Imm o0(o0_);
HLInst* node = newInst(code, o0);
if (node == nullptr)
@@ -532,7 +527,7 @@ HLInst* X86Compiler::emit(uint32_t code, int o0_) {
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) {
HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) noexcept {
Imm o0(o0_);
HLInst* node = newInst(code, o0);
if (node == nullptr)
@@ -540,7 +535,7 @@ HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) {
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) noexcept {
Imm o1(o1_);
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
@@ -548,7 +543,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) {
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) noexcept {
Imm o1(o1_);
HLInst* node = newInst(code, o0, o1);
if (node == nullptr)
@@ -556,7 +551,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) {
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) noexcept {
Imm o2(o2_);
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
@@ -564,7 +559,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, i
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) noexcept {
Imm o2(o2_);
HLInst* node = newInst(code, o0, o1, o2);
if (node == nullptr)
@@ -572,7 +567,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, u
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) noexcept {
Imm o3(o3_);
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
@@ -580,7 +575,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, c
return static_cast<HLInst*>(addNode(node));
}
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) {
HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) noexcept {
Imm o3(o3_);
HLInst* node = newInst(code, o0, o1, o2, o3);
if (node == nullptr)
@@ -592,7 +587,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, c
// [asmjit::X86Compiler - Func]
// ============================================================================
X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) {
X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) noexcept {
X86FuncNode* func = newNode<X86FuncNode>();
Error error;
@@ -639,7 +634,7 @@ _NoMemory:
return nullptr;
}
X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) {
X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) noexcept {
X86FuncNode* func = newFunc(p);
if (func == nullptr) {
@@ -661,7 +656,7 @@ X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) {
return func;
}
HLSentinel* X86Compiler::endFunc() {
HLSentinel* X86Compiler::endFunc() noexcept {
X86FuncNode* func = getFunc();
ASMJIT_ASSERT(func != nullptr);
@@ -686,7 +681,7 @@ HLSentinel* X86Compiler::endFunc() {
// [asmjit::X86Compiler - Ret]
// ============================================================================
HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) {
HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) noexcept {
HLRet* node = newNode<HLRet>(o0, o1);
if (node == nullptr)
goto _NoMemory;
@@ -697,7 +692,7 @@ _NoMemory:
return nullptr;
}
HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) {
HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) noexcept {
HLRet* node = newRet(o0, o1);
if (node == nullptr)
return node;
@@ -708,7 +703,7 @@ HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) {
// [asmjit::X86Compiler - Call]
// ============================================================================
X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) {
X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) noexcept {
X86CallNode* node = newNode<X86CallNode>(o0);
Error error;
uint32_t nArgs;
@@ -737,7 +732,7 @@ _NoMemory:
return nullptr;
}
X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) {
X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) noexcept {
X86CallNode* node = newCall(o0, p);
if (node == nullptr)
return nullptr;
@@ -748,7 +743,7 @@ X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) {
// [asmjit::X86Compiler - Vars]
// ============================================================================
Error X86Compiler::setArg(uint32_t argIndex, const Var& var) {
Error X86Compiler::setArg(uint32_t argIndex, const Var& var) noexcept {
X86FuncNode* func = getFunc();
if (func == nullptr)
@@ -763,7 +758,7 @@ Error X86Compiler::setArg(uint32_t argIndex, const Var& var) {
return kErrorOk;
}
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list ap) {
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name) noexcept {
ASMJIT_ASSERT(vType < kX86VarTypeCount);
vType = _targetVarMapping[vType];
ASMJIT_ASSERT(vType != kInvalidVar);
@@ -775,44 +770,47 @@ Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list a
return kErrorInvalidArgument;
}
const X86VarInfo& vInfo = _x86VarInfo[vType];
char buf[64];
const VarInfo& vInfo = _x86VarInfo[vType];
VarData* vd = _newVd(vInfo, name);
// Format the name if `ap` is given.
if (ap) {
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), name, ap);
buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0';
name = buf;
}
VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name);
if (vd == nullptr) {
static_cast<X86Var*>(var)->reset();
return getLastError();
}
var->_init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, vd->getId());
var->_init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, vd->getId());
var->_vreg.vType = vType;
return kErrorOk;
}
Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* fmt, va_list ap) noexcept {
char name[64];
vsnprintf(name, ASMJIT_ARRAY_SIZE(name), fmt, ap);
name[ASMJIT_ARRAY_SIZE(name) - 1] = '\0';
return _newVar(var, vType, name);
}
// ============================================================================
// [asmjit::X86Compiler - Stack]
// ============================================================================
Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) {
Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept {
if (size == 0)
return kErrorInvalidArgument;
if (alignment > 64)
alignment = 64;
VarData* vd = _newVd(kInvalidVar, size, kInvalidReg, name);
VarInfo vi = { kInvalidVar, 0, kInvalidReg , kInvalidReg, 0, "" };
VarData* vd = _newVd(vi, name);
if (vd == nullptr) {
static_cast<X86Mem*>(mem)->reset();
return getLastError();
}
vd->_size = size;
vd->_isStack = true;
vd->_alignment = static_cast<uint8_t>(alignment);
@@ -824,7 +822,7 @@ Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, co
// [asmjit::X86Compiler - Const]
// ============================================================================
Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) {
Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept {
Error error = kErrorOk;
size_t offset;

File diff suppressed because it is too large Load Diff

View File

@@ -13,12 +13,11 @@
// [Dependencies - AsmJit]
#include "../base/containers.h"
#include "../base/cpuinfo.h"
#include "../base/utils.h"
#include "../x86/x86assembler.h"
#include "../x86/x86compiler.h"
#include "../x86/x86compilercontext_p.h"
#include "../x86/x86cpuinfo.h"
#include "../x86/x86scheduler_p.h"
// [Api-Begin]
#include "../apibegin.h"
@@ -38,9 +37,9 @@ static Error X86Context_translateOperands(X86Context* self, Operand* opList, uin
// Getting `VarClass` is the only safe operation when dealing with denormalized
// `varType`. Any other property would require to map vType to the architecture
// specific type.
static ASMJIT_INLINE uint32_t x86VarTypeToClass(uint32_t vType) {
static ASMJIT_INLINE uint32_t x86VarTypeToClass(uint32_t vType) noexcept {
ASMJIT_ASSERT(vType < kX86VarTypeCount);
return _x86VarInfo[vType].getClass();
return _x86VarInfo[vType].getRegClass();
}
// ============================================================================
@@ -58,7 +57,7 @@ static void X86Context_annotateVariable(X86Context* self,
}
else {
sb.appendChar('v');
sb.appendUInt(vd->getId() & kOperandIdNum);
sb.appendUInt(vd->getId() & Operand::kIdIndexMask);
}
}
@@ -114,7 +113,7 @@ static void X86Context_annotateOperand(X86Context* self,
sb.appendChar(prefix);
// TODO: Enable again:
// if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) {
// if ((loggerOptions & (Logger::kOptionHexDisplacement)) != 0 && dispOffset > 9) {
// sb.appendString("0x", 2);
// base = 16;
// }
@@ -128,7 +127,7 @@ static void X86Context_annotateOperand(X86Context* self,
int64_t val = i->getInt64();
/*
if ((loggerOptions & (1 << kLoggerOptionHexImmediate)) && static_cast<uint64_t>(val) > 9)
if ((loggerOptions & (1 << Logger::kOptionHexImmediate)) && static_cast<uint64_t>(val) > 9)
sb.appendUInt(static_cast<uint64_t>(val), 16);
else*/
sb.appendInt(val, 10);
@@ -161,7 +160,7 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr
StringBuilderTmp<256> sb;
switch (node_->getType()) {
case kHLNodeTypeAlign: {
case HLNode::kTypeAlign: {
HLAlign* node = static_cast<HLAlign*>(node_);
sb.appendFormat(".align %u (%s)",
node->getOffset(),
@@ -169,19 +168,19 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr
break;
}
case kHLNodeTypeData: {
case HLNode::kTypeData: {
HLData* node = static_cast<HLData*>(node_);
sb.appendFormat(".embed (%u bytes)", node->getSize());
break;
}
case kHLNodeTypeComment: {
case HLNode::kTypeComment: {
HLComment* node = static_cast<HLComment*>(node_);
sb.appendFormat("; %s", node->getComment());
break;
}
case kHLNodeTypeHint: {
case HLNode::kTypeHint: {
HLHint* node = static_cast<HLHint*>(node_);
static const char* hint[16] = {
"alloc",
@@ -195,7 +194,7 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr
break;
}
case kHLNodeTypeLabel: {
case HLNode::kTypeLabel: {
HLLabel* node = static_cast<HLLabel*>(node_);
sb.appendFormat("L%u: (NumRefs=%u)",
node->getLabelId(),
@@ -203,38 +202,38 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr
break;
}
case kHLNodeTypeInst: {
case HLNode::kTypeInst: {
HLInst* node = static_cast<HLInst*>(node_);
X86Context_annotateInstruction(self, sb,
node->getInstId(), node->getOpList(), node->getOpCount());
break;
}
case kHLNodeTypeFunc: {
case HLNode::kTypeFunc: {
HLFunc* node = static_cast<HLFunc*>(node_);
sb.appendFormat("[func]");
break;
}
case kHLNodeTypeSentinel: {
case HLNode::kTypeSentinel: {
HLSentinel* node = static_cast<HLSentinel*>(node_);
sb.appendFormat("[end]");
break;
}
case kHLNodeTypeRet: {
case HLNode::kTypeRet: {
HLRet* node = static_cast<HLRet*>(node_);
sb.appendFormat("[ret]");
break;
}
case kHLNodeTypeCall: {
case HLNode::kTypeCall: {
HLCall* node = static_cast<HLCall*>(node_);
sb.appendFormat("[call]");
break;
}
case kHLNodeTypeCallArg: {
case HLNode::kTypeCallArg: {
HLCallArg* node = static_cast<HLCallArg*>(node_);
sb.appendFormat("[sarg]");
break;
@@ -2006,14 +2005,14 @@ static ASMJIT_INLINE Error X86Context_insertHLCallArg(
sArgCount++;
}
const X86VarInfo& sInfo = _x86VarInfo[sType];
uint32_t sClass = sInfo.getClass();
const VarInfo& sInfo = _x86VarInfo[sType];
uint32_t sClass = sInfo.getRegClass();
if (X86Context_mustConvertSArg(self, aType, sType)) {
uint32_t cType = X86Context_typeOfConvertedSArg(self, aType, sType);
const X86VarInfo& cInfo = _x86VarInfo[cType];
uint32_t cClass = cInfo.getClass();
const VarInfo& cInfo = _x86VarInfo[cType];
uint32_t cClass = cInfo.getRegClass();
while (++i < sArgCount) {
sArgData = &sArgList[i];
@@ -2027,7 +2026,7 @@ static ASMJIT_INLINE Error X86Context_insertHLCallArg(
return kErrorOk;
}
VarData* cVd = compiler->_newVd(cType, cInfo.getSize(), cInfo.getClass(), nullptr);
VarData* cVd = compiler->_newVd(cInfo, nullptr);
if (cVd == nullptr)
return kErrorNoHeapMemory;
@@ -2289,15 +2288,15 @@ _NextGroup:
// [Align/Embed]
// ----------------------------------------------------------------------
case kHLNodeTypeAlign:
case kHLNodeTypeData:
case HLNode::kTypeAlign:
case HLNode::kTypeData:
break;
// ----------------------------------------------------------------------
// [Hint]
// ----------------------------------------------------------------------
case kHLNodeTypeHint: {
case HLNode::kTypeHint: {
HLHint* node = static_cast<HLHint*>(node_);
VI_BEGIN();
@@ -2351,7 +2350,7 @@ _NextGroup:
compiler->removeNode(cur);
cur = static_cast<HLHint*>(node->getNext());
if (cur == nullptr || cur->getType() != kHLNodeTypeHint || cur->getHint() != kVarHintAlloc)
if (cur == nullptr || cur->getType() != HLNode::kTypeHint || cur->getHint() != kVarHintAlloc)
break;
}
@@ -2389,7 +2388,7 @@ _NextGroup:
// [Target]
// ----------------------------------------------------------------------
case kHLNodeTypeLabel: {
case HLNode::kTypeLabel: {
if (node_ == func->getExitNode()) {
ASMJIT_PROPAGATE_ERROR(addReturningNode(node_));
goto _NextGroup;
@@ -2401,7 +2400,7 @@ _NextGroup:
// [Inst]
// ----------------------------------------------------------------------
case kHLNodeTypeInst: {
case HLNode::kTypeInst: {
HLInst* node = static_cast<HLInst*>(node_);
uint32_t instId = node->getInstId();
@@ -2417,10 +2416,10 @@ _NextGroup:
// Collect instruction flags and merge all 'VarAttr's.
if (extendedInfo.isFp())
flags |= kHLNodeFlagIsFp;
flags |= HLNode::kFlagIsFp;
if (extendedInfo.isSpecial() && (special = X86SpecialInst_get(instId, opList, opCount)) != nullptr)
flags |= kHLNodeFlagIsSpecial;
flags |= HLNode::kFlagIsSpecial;
uint32_t gpAllowedMask = 0xFFFFFFFF;
@@ -2656,11 +2655,11 @@ _NextGroup:
if (jTarget->isFetched()) {
uint32_t jTargetFlowId = jTarget->getFlowId();
// Update kHLNodeFlagIsTaken flag to true if this is a conditional
// backward jump. This behavior can be overridden by using
// `kInstOptionTaken` when the instruction is created.
// Update HLNode::kFlagIsTaken flag to true if this is a
// conditional backward jump. This behavior can be overridden
// by using `kInstOptionTaken` when the instruction is created.
if (!jNode->isTaken() && opCount == 1 && jTargetFlowId <= flowId) {
jNode->orFlags(kHLNodeFlagIsTaken);
jNode->orFlags(HLNode::kFlagIsTaken);
}
}
else if (next->isFetched()) {
@@ -2681,7 +2680,7 @@ _NextGroup:
// [Func]
// ----------------------------------------------------------------------
case kHLNodeTypeFunc: {
case HLNode::kTypeFunc: {
ASMJIT_ASSERT(node_ == func);
X86FuncDecl* decl = func->getDecl();
@@ -2732,7 +2731,7 @@ _NextGroup:
// [End]
// ----------------------------------------------------------------------
case kHLNodeTypeSentinel: {
case HLNode::kTypeSentinel: {
ASMJIT_PROPAGATE_ERROR(addReturningNode(node_));
goto _NextGroup;
}
@@ -2741,7 +2740,7 @@ _NextGroup:
// [Ret]
// ----------------------------------------------------------------------
case kHLNodeTypeRet: {
case HLNode::kTypeRet: {
HLRet* node = static_cast<HLRet*>(node_);
ASMJIT_PROPAGATE_ERROR(addReturningNode(node));
@@ -2788,7 +2787,7 @@ _NextGroup:
// [Call]
// ----------------------------------------------------------------------
case kHLNodeTypeCall: {
case HLNode::kTypeCall: {
X86CallNode* node = static_cast<X86CallNode*>(node_);
X86FuncDecl* decl = node->getDecl();
@@ -2962,7 +2961,7 @@ Error X86Context::annotate() {
uint32_t maxLen = 0;
while (node_ != end) {
if (node_->getComment() == nullptr) {
if (node_->getType() == kHLNodeTypeInst) {
if (node_->getType() == HLNode::kTypeInst) {
HLInst* node = static_cast<HLInst*>(node_);
X86Context_annotateInstruction(this, sb, node->getInstId(), node->getOpList(), node->getOpCount());
@@ -3277,11 +3276,11 @@ ASMJIT_INLINE Error X86VarAlloc::run(HLNode* node_) {
alloc<kX86RegClassXyz>();
// Translate node operands.
if (node_->getType() == kHLNodeTypeInst) {
if (node_->getType() == HLNode::kTypeInst) {
HLInst* node = static_cast<HLInst*>(node_);
ASMJIT_PROPAGATE_ERROR(X86Context_translateOperands(_context, node->getOpList(), node->getOpCount()));
}
else if (node_->getType() == kHLNodeTypeCallArg) {
else if (node_->getType() == HLNode::kTypeCallArg) {
HLCallArg* node = static_cast<HLCallArg*>(node_);
X86CallNode* call = static_cast<X86CallNode*>(node->getCall());
@@ -3895,11 +3894,11 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe
_Advance:
// Terminate if this is a return node.
if (node->hasFlag(kHLNodeFlagIsRet))
if (node->hasFlag(HLNode::kFlagIsRet))
goto _Done;
// Advance on non-conditional jump.
if (node->hasFlag(kHLNodeFlagIsJmp)) {
if (node->hasFlag(HLNode::kFlagIsJmp)) {
// Stop on a jump that is not followed.
node = static_cast<HLJump*>(node)->getTarget();
if (node == nullptr)
@@ -3908,7 +3907,7 @@ _Advance:
}
// Split flow on a conditional jump.
if (node->hasFlag(kHLNodeFlagIsJcc)) {
if (node->hasFlag(HLNode::kFlagIsJcc)) {
// Put the next node on the stack and follow the target if possible.
HLNode* next = node->getNext();
if (next != nullptr && gfIndex < kMaxGuessFlow)
@@ -3972,15 +3971,15 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe
break;
// Stop on `HLSentinel` and `HLRet`.
if (node->hasFlag(kHLNodeFlagIsRet))
if (node->hasFlag(HLNode::kFlagIsRet))
break;
// Stop on conditional jump, we don't follow them.
if (node->hasFlag(kHLNodeFlagIsJcc))
if (node->hasFlag(HLNode::kFlagIsJcc))
break;
// Advance on non-conditional jump.
if (node->hasFlag(kHLNodeFlagIsJmp)) {
if (node->hasFlag(HLNode::kFlagIsJmp)) {
node = static_cast<HLJump*>(node)->getTarget();
// Stop on jump that is not followed.
if (node == nullptr)
@@ -4580,15 +4579,15 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR
HLNode* node = _node;
for (i = 0; i < maxLookAhead; i++) {
// Stop on 'HLRet' and 'HLSentinel.
if (node->hasFlag(kHLNodeFlagIsRet))
if (node->hasFlag(HLNode::kFlagIsRet))
break;
// Stop on conditional jump, we don't follow them.
if (node->hasFlag(kHLNodeFlagIsJcc))
if (node->hasFlag(HLNode::kFlagIsJcc))
break;
// Advance on non-conditional jump.
if (node->hasFlag(kHLNodeFlagIsJmp)) {
if (node->hasFlag(HLNode::kFlagIsJmp)) {
node = static_cast<HLJump*>(node)->getTarget();
// Stop on jump that is not followed.
if (node == nullptr)
@@ -4707,7 +4706,7 @@ ASMJIT_INLINE void X86CallAlloc::ret() {
continue;
VarData* vd = _compiler->getVdById(op->getId());
uint32_t vf = _x86VarInfo[vd->getType()].getDesc();
uint32_t vf = _x86VarInfo[vd->getType()].getFlags();
uint32_t regIndex = ret.getRegIndex();
switch (vd->getClass()) {
@@ -4729,8 +4728,8 @@ ASMJIT_INLINE void X86CallAlloc::ret() {
if (ret.getVarType() == kVarTypeFp32 || ret.getVarType() == kVarTypeFp64) {
X86Mem m = _context->getVarMem(vd);
m.setSize(
(vf & kVarFlagSp) ? 4 :
(vf & kVarFlagDp) ? 8 :
(vf & VarInfo::kFlagSP) ? 4 :
(vf & VarInfo::kFlagDP) ? 8 :
(ret.getVarType() == kVarTypeFp32) ? 4 : 8);
_context->unuse<kX86RegClassXyz>(vd, kVarStateMem);
@@ -4765,7 +4764,7 @@ static Error X86Context_translateOperands(X86Context* self, Operand* opList, uin
ASMJIT_ASSERT(vd != nullptr);
ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg);
op->_vreg.op = kOperandTypeReg;
op->_vreg.op = Operand::kTypeReg;
op->_vreg.index = vd->getRegIndex();
}
else if (op->isMem()) {
@@ -5015,7 +5014,7 @@ static Error X86Context_patchFuncMem(X86Context* self, X86FuncNode* func, HLNode
HLNode* node = func;
do {
if (node->getType() == kHLNodeTypeInst) {
if (node->getType() == HLNode::kTypeInst) {
HLInst* iNode = static_cast<HLInst*>(node);
if (iNode->hasMemOp()) {
@@ -5336,10 +5335,10 @@ static Error X86Context_translateRet(X86Context* self, HLRet* rNode, HLLabel* ex
VarData* vd = va.getVd();
X86Mem m(self->getVarMem(vd));
uint32_t flags = _x86VarInfo[vd->getType()].getDesc();
uint32_t flags = _x86VarInfo[vd->getType()].getFlags();
m.setSize(
(flags & kVarFlagSp) ? 4 :
(flags & kVarFlagDp) ? 8 :
(flags & VarInfo::kFlagSP) ? 4 :
(flags & VarInfo::kFlagDP) ? 8 :
va.hasFlag(kVarAttrX86Fld4) ? 4 : 8);
compiler->fld(m);
@@ -5352,29 +5351,29 @@ static Error X86Context_translateRet(X86Context* self, HLRet* rNode, HLLabel* ex
switch (node->getType()) {
// If we have found an exit label we just return, there is no need to
// emit jump to that.
case kHLNodeTypeLabel:
case HLNode::kTypeLabel:
if (static_cast<HLLabel*>(node) == exitTarget)
return kErrorOk;
goto _EmitRet;
case kHLNodeTypeData:
case kHLNodeTypeInst:
case kHLNodeTypeCall:
case kHLNodeTypeRet:
case HLNode::kTypeData:
case HLNode::kTypeInst:
case HLNode::kTypeCall:
case HLNode::kTypeRet:
goto _EmitRet;
// Continue iterating.
case kHLNodeTypeComment:
case kHLNodeTypeAlign:
case kHLNodeTypeHint:
case HLNode::kTypeComment:
case HLNode::kTypeAlign:
case HLNode::kTypeHint:
break;
// Invalid node to be here.
case kHLNodeTypeFunc:
case HLNode::kTypeFunc:
return self->getCompiler()->setLastError(kErrorInvalidState);
// We can't go forward from here.
case kHLNodeTypeSentinel:
case HLNode::kTypeSentinel:
return kErrorOk;
}
@@ -5413,7 +5412,7 @@ Error X86Context::translate() {
for (;;) {
while (node_->isTranslated()) {
// Switch state if we went to the already translated node.
if (node_->getType() == kHLNodeTypeLabel) {
if (node_->getType() == HLNode::kTypeLabel) {
HLLabel* node = static_cast<HLLabel*>(node_);
compiler->_setCursor(node->getPrev());
switchState(node->getState());
@@ -5448,7 +5447,7 @@ _NextGroup:
}
next = node_->getNext();
node_->orFlags(kHLNodeFlagIsTranslated);
node_->orFlags(HLNode::kFlagIsTranslated);
ASMJIT_TSEC({
X86Context_traceNode(this, node_, "[T] ");
@@ -5459,15 +5458,15 @@ _NextGroup:
// [Align / Embed]
// ----------------------------------------------------------------------
case kHLNodeTypeAlign:
case kHLNodeTypeData:
case HLNode::kTypeAlign:
case HLNode::kTypeData:
break;
// ----------------------------------------------------------------------
// [Target]
// ----------------------------------------------------------------------
case kHLNodeTypeLabel: {
case HLNode::kTypeLabel: {
HLLabel* node = static_cast<HLLabel*>(node_);
ASMJIT_ASSERT(!node->hasState());
node->setState(saveState());
@@ -5478,9 +5477,9 @@ _NextGroup:
// [Inst/Call/SArg/Ret]
// ----------------------------------------------------------------------
case kHLNodeTypeInst:
case kHLNodeTypeCall:
case kHLNodeTypeCallArg:
case HLNode::kTypeInst:
case HLNode::kTypeCall:
case HLNode::kTypeCallArg:
// Update VarAttr's unuse flags based on liveness of the next node.
if (!node_->isJcc()) {
X86VarMap* map = static_cast<X86VarMap*>(node_->getMap());
@@ -5500,14 +5499,14 @@ _NextGroup:
}
}
if (node_->getType() == kHLNodeTypeCall) {
if (node_->getType() == HLNode::kTypeCall) {
ASMJIT_PROPAGATE_ERROR(cAlloc.run(static_cast<X86CallNode*>(node_)));
break;
}
ASMJIT_FALLTHROUGH;
case kHLNodeTypeHint:
case kHLNodeTypeRet: {
case HLNode::kTypeHint:
case HLNode::kTypeRet: {
ASMJIT_PROPAGATE_ERROR(vAlloc.run(node_));
// Handle conditional/unconditional jump.
@@ -5539,7 +5538,7 @@ _NextGroup:
if (jTarget->isTranslated()) {
if (jNext->isTranslated()) {
ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel);
ASMJIT_ASSERT(jNext->getType() == HLNode::kTypeLabel);
compiler->_setCursor(node->getPrev());
intersectStates(jTarget->getState(), jNext->getState());
}
@@ -5551,7 +5550,7 @@ _NextGroup:
next = jNext;
}
else if (jNext->isTranslated()) {
ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel);
ASMJIT_ASSERT(jNext->getType() == HLNode::kTypeLabel);
VarState* savedState = saveState();
node->setState(savedState);
@@ -5577,7 +5576,7 @@ _NextGroup:
// [Func]
// ----------------------------------------------------------------------
case kHLNodeTypeFunc: {
case HLNode::kTypeFunc: {
ASMJIT_ASSERT(node_ == func);
X86FuncDecl* decl = func->getDecl();
@@ -5626,7 +5625,7 @@ _NextGroup:
// [End]
// ----------------------------------------------------------------------
case kHLNodeTypeSentinel: {
case HLNode::kTypeSentinel: {
goto _NextGroup;
}
@@ -5648,115 +5647,6 @@ _Done:
return kErrorOk;
}
// ============================================================================
// [asmjit::X86Context - Schedule]
// ============================================================================
Error X86Context::schedule() {
X86Compiler* compiler = getCompiler();
X86Scheduler scheduler(compiler,
static_cast<const X86CpuInfo*>(compiler->getRuntime()->getCpuInfo()));
HLNode* node_ = getFunc();
HLNode* stop = getStop();
ASMJIT_UNUSED(stop); // Unused in release mode.
PodList<HLNode*>::Link* jLink = _jccList.getFirst();
// --------------------------------------------------------------------------
// [Loop]
// --------------------------------------------------------------------------
_Advance:
while (node_->isScheduled()) {
_NextGroup:
if (jLink == nullptr)
goto _Done;
// We always go to the next instruction in the main loop so we have to
// jump to the `jcc` target here.
node_ = static_cast<HLJump*>(jLink->getValue())->getTarget();
jLink = jLink->getNext();
}
// Find interval that can be passed to scheduler.
for (;;) {
HLNode* schedStart = node_;
for (;;) {
HLNode* next = node_->getNext();
node_->orFlags(kHLNodeFlagIsScheduled);
// Shouldn't happen here, investigate if hit.
ASMJIT_ASSERT(node_ != stop);
uint32_t nodeType = node_->getType();
if (nodeType != kHLNodeTypeInst) {
// If we didn't reach any instruction node we simply advance. In this
// case no informative nodes will be removed and everything else just
// skipped.
if (schedStart == node_) {
node_ = next;
if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet)
goto _NextGroup;
else
goto _Advance;
}
// Remove informative nodes if we are in a middle of instruction stream.
//
// TODO: Shouldn't be there an option for this? Maybe it can be useful
// to stop if there is a comment or something. I'm not sure if it's
// good to always remove.
if (node_->isInformative()) {
compiler->removeNode(node_);
node_ = next;
continue;
}
break;
}
// Stop if `node_` is `jmp` or `jcc`.
if (node_->isJmpOrJcc())
break;
node_ = next;
}
// If the stream is less than 3 instructions it will not be passed to
// scheduler.
if (schedStart != node_ &&
schedStart->getNext() != node_ &&
schedStart->getNext() != node_->getPrev()) {
scheduler.run(schedStart, node_);
}
// If node is `jmp` we follow it as well.
if (node_->isJmp()) {
node_ = static_cast<HLJump*>(node_)->getTarget();
if (node_ == nullptr)
goto _NextGroup;
else
goto _Advance;
}
// Handle stop nodes.
{
uint32_t nodeType = node_->getType();
if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet)
goto _NextGroup;
}
node_ = node_->getNext();
goto _Advance;
}
_Done:
return kErrorOk;
}
// ============================================================================
// [asmjit::X86Context - Serialize]
// ============================================================================
@@ -5779,39 +5669,39 @@ Error X86Context::serialize(Assembler* assembler_, HLNode* start, HLNode* stop)
#endif // !ASMJIT_DISABLE_LOGGER
switch (node_->getType()) {
case kHLNodeTypeAlign: {
case HLNode::kTypeAlign: {
HLAlign* node = static_cast<HLAlign*>(node_);
assembler->align(node->getAlignMode(), node->getOffset());
break;
}
case kHLNodeTypeData: {
case HLNode::kTypeData: {
HLData* node = static_cast<HLData*>(node_);
assembler->embed(node->getData(), node->getSize());
break;
}
case kHLNodeTypeComment: {
case HLNode::kTypeComment: {
#if !defined(ASMJIT_DISABLE_LOGGER)
HLComment* node = static_cast<HLComment*>(node_);
if (logger)
logger->logFormat(kLoggerStyleComment,
logger->logFormat(Logger::kStyleComment,
"%s; %s\n", logger->getIndentation(), node->getComment());
#endif // !ASMJIT_DISABLE_LOGGER
break;
}
case kHLNodeTypeHint: {
case HLNode::kTypeHint: {
break;
}
case kHLNodeTypeLabel: {
case HLNode::kTypeLabel: {
HLLabel* node = static_cast<HLLabel*>(node_);
assembler->bind(node->getLabel());
break;
}
case kHLNodeTypeInst: {
case HLNode::kTypeInst: {
HLInst* node = static_cast<HLInst*>(node_);
uint32_t instId = node->getInstId();
@@ -5970,15 +5860,15 @@ Error X86Context::serialize(Assembler* assembler_, HLNode* start, HLNode* stop)
// Function scope and return is translated to another nodes, no special
// handling is required at this point.
case kHLNodeTypeFunc:
case kHLNodeTypeSentinel:
case kHLNodeTypeRet: {
case HLNode::kTypeFunc:
case HLNode::kTypeSentinel:
case HLNode::kTypeRet: {
break;
}
// Function call adds nodes before and after, but it's required to emit
// the call instruction by itself.
case kHLNodeTypeCall: {
case HLNode::kTypeCall: {
X86CallNode* node = static_cast<X86CallNode*>(node_);
assembler->emit(kX86InstIdCall, node->_target, noOperand, noOperand);
break;

View File

@@ -662,12 +662,6 @@ struct X86Context : public Context {
virtual Error translate();
// --------------------------------------------------------------------------
// [Schedule]
// --------------------------------------------------------------------------
virtual Error schedule();
// --------------------------------------------------------------------------
// [Serialize]
// --------------------------------------------------------------------------

View File

@@ -1,401 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
// [Dependencies - AsmJit]
#include "../base/utils.h"
#include "../x86/x86cpuinfo.h"
// 2009-02-05: Thanks to Mike Tajmajer for VC7.1 compiler support. It shouldn't
// affect x64 compilation, because x64 compiler starts with VS2005 (VC8.0).
#if defined(_MSC_VER) && (_MSC_VER >= 1400)
#include <intrin.h>
#endif // _MSC_VER >= 1400
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86CpuVendor]
// ============================================================================
struct X86CpuVendor {
uint32_t id;
char text[12];
};
static const X86CpuVendor x86CpuVendorList[] = {
{ kCpuVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } },
{ kCpuVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } },
{ kCpuVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } },
{ kCpuVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } }
};
static ASMJIT_INLINE bool x86CpuVendorEq(const X86CpuVendor& info, const char* vendorString) {
const uint32_t* a = reinterpret_cast<const uint32_t*>(info.text);
const uint32_t* b = reinterpret_cast<const uint32_t*>(vendorString);
return (a[0] == b[0]) & (a[1] == b[1]) & (a[2] == b[2]);
}
static ASMJIT_INLINE void x86SimplifyBrandString(char* s) {
// Always clear the current character in the buffer. It ensures that there
// is no garbage after the string zero terminator.
char* d = s;
char prev = 0;
char curr = s[0];
s[0] = '\0';
for (;;) {
if (curr == 0)
break;
if (curr == ' ') {
if (prev == '@' || s[1] == ' ' || s[1] == '@')
goto _Skip;
}
d[0] = curr;
d++;
prev = curr;
_Skip:
curr = *++s;
s[0] = '\0';
}
d[0] = '\0';
}
// ============================================================================
// [asmjit::X86CpuUtil]
// ============================================================================
// This is messy, I know. Cpuid is implemented as intrinsic in VS2005, but
// we should support other compilers as well. Main problem is that MS compilers
// in 64-bit mode not allows to use inline assembler, so we need intrinsic and
// we need also asm version.
union X86XCR {
uint64_t value;
struct {
uint32_t eax;
uint32_t edx;
};
};
// callCpuId() and detectCpuInfo() for x86 and x64 platforms begins here.
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
void X86CpuUtil::_docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* result) {
#if defined(_MSC_VER)
// __cpuidex was introduced by VS2008-SP1.
# if _MSC_FULL_VER >= 150030729
__cpuidex(reinterpret_cast<int*>(result->i), inEax, inEcx);
# elif ASMJIT_ARCH_X64
// VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However, 64-bit
// calling convention specifies parameter to be passed in ECX/RCX, so we may
// be lucky if compiler doesn't move the register, otherwise the result is
// undefined.
__cpuid(reinterpret_cast<int*>(result->i), inEax);
# else
uint32_t cpuid_eax = inEax;
uint32_t cpuid_ecx = inEcx;
uint32_t* cpuid_out = result->i;
__asm {
mov eax, cpuid_eax
mov ecx, cpuid_ecx
mov edi, cpuid_out
cpuid
mov dword ptr[edi + 0], eax
mov dword ptr[edi + 4], ebx
mov dword ptr[edi + 8], ecx
mov dword ptr[edi + 12], edx
}
# endif
#elif defined(__GNUC__)
// Note, patched to preserve ebx/rbx register which is used by GCC.
# if ASMJIT_ARCH_X86
# define __myCpuId(inEax, inEcx, outEax, outEbx, outEcx, outEdx) \
__asm__ __volatile__( \
"mov %%ebx, %%edi\n" \
"cpuid\n" \
"xchg %%edi, %%ebx\n" \
: "=a" (outEax), "=D" (outEbx), "=c" (outEcx), "=d" (outEdx) \
: "a" (inEax), "c" (inEcx))
# else
# define __myCpuId(inEax, inEcx, outEax, outEbx, outEcx, outEdx) \
__asm__ __volatile__( \
"mov %%rbx, %%rdi\n" \
"cpuid\n" \
"xchg %%rdi, %%rbx\n" \
: "=a" (outEax), "=D" (outEbx), "=c" (outEcx), "=d" (outEdx) \
: "a" (inEax), "c" (inEcx))
# endif
__myCpuId(inEax, inEcx, result->eax, result->ebx, result->ecx, result->edx);
#else
# error "[asmjit] X86CpuUtil::_docpuid() unimplemented!"
#endif
}
static void callXGetBV(X86XCR* result, uint32_t inEcx) {
#if defined(_MSC_VER)
# if (_MSC_FULL_VER >= 160040219) // 2010SP1+
result->value = _xgetbv(inEcx);
# else
result->value = 0;
# endif
#elif defined(__GNUC__)
unsigned int eax, edx;
// Removed, because the world is not perfect:
// __asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(inEcx));
__asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(inEcx));
result->eax = eax;
result->edx = edx;
#else
result->value = 0;
#endif // COMPILER
}
void X86CpuUtil::detect(X86CpuInfo* cpuInfo) {
uint32_t i;
uint32_t maxBaseId;
X86CpuId regs;
X86XCR xcr0;
xcr0.value = 0;
// Clear everything except the '_size' member.
::memset(reinterpret_cast<uint8_t*>(cpuInfo) + sizeof(uint32_t),
0, sizeof(CpuInfo) - sizeof(uint32_t));
// Fill safe defaults.
cpuInfo->_hwThreadsCount = CpuInfo::detectHwThreadsCount();
// --------------------------------------------------------------------------
// [CPUID EAX=0x0]
// --------------------------------------------------------------------------
// Get vendor string/id.
callCpuId(&regs, 0x0);
maxBaseId = regs.eax;
::memcpy(cpuInfo->_vendorString, &regs.ebx, 4);
::memcpy(cpuInfo->_vendorString + 4, &regs.edx, 4);
::memcpy(cpuInfo->_vendorString + 8, &regs.ecx, 4);
for (i = 0; i < ASMJIT_ARRAY_SIZE(x86CpuVendorList); i++) {
if (x86CpuVendorEq(x86CpuVendorList[i], cpuInfo->_vendorString)) {
cpuInfo->_vendorId = x86CpuVendorList[i].id;
break;
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x1]
// --------------------------------------------------------------------------
if (maxBaseId >= 0x1) {
// Get feature flags in ECX/EDX and family/model in EAX.
callCpuId(&regs, 0x1);
// Fill family and model fields.
cpuInfo->_family = (regs.eax >> 8) & 0x0F;
cpuInfo->_model = (regs.eax >> 4) & 0x0F;
cpuInfo->_stepping = (regs.eax ) & 0x0F;
// Use extended family and model fields.
if (cpuInfo->_family == 0x0F) {
cpuInfo->_family += ((regs.eax >> 20) & 0xFF);
cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4;
}
cpuInfo->_processorType = ((regs.eax >> 12) & 0x03);
cpuInfo->_brandIndex = ((regs.ebx ) & 0xFF);
cpuInfo->_flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8;
cpuInfo->_maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureSSE3);
if (regs.ecx & 0x00000002U) cpuInfo->addFeature(kX86CpuFeaturePCLMULQDQ);
if (regs.ecx & 0x00000008U) cpuInfo->addFeature(kX86CpuFeatureMONITOR);
if (regs.ecx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureSSSE3);
if (regs.ecx & 0x00002000U) cpuInfo->addFeature(kX86CpuFeatureCMPXCHG16B);
if (regs.ecx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureSSE4_1);
if (regs.ecx & 0x00100000U) cpuInfo->addFeature(kX86CpuFeatureSSE4_2);
if (regs.ecx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMOVBE);
if (regs.ecx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeaturePOPCNT);
if (regs.ecx & 0x02000000U) cpuInfo->addFeature(kX86CpuFeatureAESNI);
if (regs.ecx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE);
if (regs.ecx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE_OS);
if (regs.ecx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeatureRDRAND);
if (regs.edx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureRDTSC);
if (regs.edx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeatureCMPXCHG8B);
if (regs.edx & 0x00008000U) cpuInfo->addFeature(kX86CpuFeatureCMOV);
if (regs.edx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSH);
if (regs.edx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureMMX);
if (regs.edx & 0x01000000U) cpuInfo->addFeature(kX86CpuFeatureFXSR);
if (regs.edx & 0x02000000U) cpuInfo->addFeature(kX86CpuFeatureSSE).addFeature(kX86CpuFeatureMMX2);
if (regs.edx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureSSE).addFeature(kX86CpuFeatureSSE2);
if (regs.edx & 0x10000000U) cpuInfo->addFeature(kX86CpuFeatureMT);
// AMD sets Multithreading to ON if it has two or more cores.
if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == kCpuVendorAMD && (regs.edx & 0x10000000U)) {
cpuInfo->_hwThreadsCount = 2;
}
// Get the content of XCR0 if supported by CPU and enabled by OS.
if ((regs.ecx & 0x0C000000U) == 0x0C000000U) {
callXGetBV(&xcr0, 0);
}
// Detect AVX+.
if (regs.ecx & 0x10000000U) {
// - XCR0[2:1] == 11b
// XMM & YMM states are enabled by OS.
if ((xcr0.eax & 0x00000006U) == 0x00000006U) {
cpuInfo->addFeature(kX86CpuFeatureAVX);
if (regs.ecx & 0x00000800U) cpuInfo->addFeature(kX86CpuFeatureXOP);
if (regs.ecx & 0x00004000U) cpuInfo->addFeature(kX86CpuFeatureFMA3);
if (regs.ecx & 0x00010000U) cpuInfo->addFeature(kX86CpuFeatureFMA4);
if (regs.ecx & 0x20000000U) cpuInfo->addFeature(kX86CpuFeatureF16C);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x7 ECX=0x0]
// --------------------------------------------------------------------------
// Detect new features if the processor supports CPUID-07.
bool maybeMPX = false;
if (maxBaseId >= 0x7) {
callCpuId(&regs, 0x7);
if (regs.ebx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureFSGSBASE);
if (regs.ebx & 0x00000008U) cpuInfo->addFeature(kX86CpuFeatureBMI);
if (regs.ebx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureHLE);
if (regs.ebx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeatureBMI2);
if (regs.ebx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureMOVSBSTOSB_OPT);
if (regs.ebx & 0x00000800U) cpuInfo->addFeature(kX86CpuFeatureRTM);
if (regs.ebx & 0x00004000U) maybeMPX = true;
if (regs.ebx & 0x00040000U) cpuInfo->addFeature(kX86CpuFeatureRDSEED);
if (regs.ebx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureADX);
if (regs.ebx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSH_OPT);
if (regs.ebx & 0x20000000U) cpuInfo->addFeature(kX86CpuFeatureSHA);
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeaturePREFETCHWT1);
// Detect AVX2.
if (cpuInfo->hasFeature(kX86CpuFeatureAVX)) {
if (regs.ebx & 0x00000020U) cpuInfo->addFeature(kX86CpuFeatureAVX2);
}
// Detect AVX-512+.
if (regs.ebx & 0x00010000U) {
// - XCR0[2:1] == 11b
// XMM & YMM states are enabled by OS.
// - XCR0[7:5] == 111b
// Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 state are enabled by OS.
if ((xcr0.eax & 0x00000076U) == 0x00000076U) {
cpuInfo->addFeature(kX86CpuFeatureAVX512F);
if (regs.ebx & 0x00020000U) cpuInfo->addFeature(kX86CpuFeatureAVX512DQ);
if (regs.ebx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512PF);
if (regs.ebx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512ER);
if (regs.ebx & 0x10000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512CD);
if (regs.ebx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512BW);
if (regs.ebx & 0x80000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512VL);
}
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0xD, ECX=0x0]
// --------------------------------------------------------------------------
if (maxBaseId >= 0xD && maybeMPX) {
callCpuId(&regs, 0xD);
// Both CPUID result and XCR0 has to be enabled to have support for MPX.
if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U) {
cpuInfo->addFeature(kX86CpuFeatureMPX);
}
}
// --------------------------------------------------------------------------
// [CPUID EAX=0x80000000]
// --------------------------------------------------------------------------
// Calling cpuid with 0x80000000 as the in argument gets the number of valid
// extended IDs.
callCpuId(&regs, 0x80000000);
uint32_t maxExtId = Utils::iMin<uint32_t>(regs.eax, 0x80000004);
uint32_t* brand = reinterpret_cast<uint32_t*>(cpuInfo->_brandString);
for (i = 0x80000001; i <= maxExtId; i++) {
callCpuId(&regs, i);
switch (i) {
case 0x80000001:
if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureLahfSahf);
if (regs.ecx & 0x00000020U) cpuInfo->addFeature(kX86CpuFeatureLZCNT);
if (regs.ecx & 0x00000040U) cpuInfo->addFeature(kX86CpuFeatureSSE4A);
if (regs.ecx & 0x00000080U) cpuInfo->addFeature(kX86CpuFeatureMSSE);
if (regs.ecx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeaturePREFETCH);
if (regs.edx & 0x00100000U) cpuInfo->addFeature(kX86CpuFeatureNX);
if (regs.edx & 0x00200000U) cpuInfo->addFeature(kX86CpuFeatureFXSR_OPT);
if (regs.edx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMMX2);
if (regs.edx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureRDTSCP);
if (regs.edx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeature3DNOW2).addFeature(kX86CpuFeatureMMX2);
if (regs.edx & 0x80000000U) cpuInfo->addFeature(kX86CpuFeature3DNOW);
break;
case 0x80000002:
case 0x80000003:
case 0x80000004:
*brand++ = regs.eax;
*brand++ = regs.ebx;
*brand++ = regs.ecx;
*brand++ = regs.edx;
break;
default:
// Additional features can be detected in the future.
break;
}
}
// Simplify the brand string (remove unnecessary spaces to make printing prettier).
x86SimplifyBrandString(cpuInfo->_brandString);
}
#endif
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64

View File

@@ -1,273 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86CPUINFO_H
#define _ASMJIT_X86_X86CPUINFO_H
// [Dependencies - AsmJit]
#include "../base/cpuinfo.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct X86CpuInfo;
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::X86CpuFeature]
// ============================================================================
//! X86 CPU features.
ASMJIT_ENUM(X86CpuFeature) {
//! Cpu has Not-Execute-Bit.
kX86CpuFeatureNX = 0,
//! Cpu has multithreading.
kX86CpuFeatureMT,
//! Cpu has RDTSC.
kX86CpuFeatureRDTSC,
//! Cpu has RDTSCP.
kX86CpuFeatureRDTSCP,
//! Cpu has CMOV.
kX86CpuFeatureCMOV,
//! Cpu has CMPXCHG8B.
kX86CpuFeatureCMPXCHG8B,
//! Cpu has CMPXCHG16B (X64).
kX86CpuFeatureCMPXCHG16B,
//! Cpu has CLFUSH.
kX86CpuFeatureCLFLUSH,
//! Cpu has CLFUSH (Optimized).
kX86CpuFeatureCLFLUSH_OPT,
//! Cpu has PREFETCH.
kX86CpuFeaturePREFETCH,
//! Cpu has PREFETCHWT1.
kX86CpuFeaturePREFETCHWT1,
//! Cpu has LAHF/SAHF.
kX86CpuFeatureLahfSahf,
//! Cpu has FXSAVE/FXRSTOR.
kX86CpuFeatureFXSR,
//! Cpu has FXSAVE/FXRSTOR (Optimized).
kX86CpuFeatureFXSR_OPT,
//! Cpu has MMX.
kX86CpuFeatureMMX,
//! Cpu has extended MMX.
kX86CpuFeatureMMX2,
//! Cpu has 3dNow!
kX86CpuFeature3DNOW,
//! Cpu has enchanced 3dNow!
kX86CpuFeature3DNOW2,
//! Cpu has SSE.
kX86CpuFeatureSSE,
//! Cpu has SSE2.
kX86CpuFeatureSSE2,
//! Cpu has SSE3.
kX86CpuFeatureSSE3,
//! Cpu has SSSE3.
kX86CpuFeatureSSSE3,
//! Cpu has SSE4.A.
kX86CpuFeatureSSE4A,
//! Cpu has SSE4.1.
kX86CpuFeatureSSE4_1,
//! Cpu has SSE4.2.
kX86CpuFeatureSSE4_2,
//! Cpu has Misaligned SSE (MSSE).
kX86CpuFeatureMSSE,
//! Cpu has MONITOR and MWAIT.
kX86CpuFeatureMONITOR,
//! Cpu has MOVBE.
kX86CpuFeatureMOVBE,
//! Cpu has POPCNT.
kX86CpuFeaturePOPCNT,
//! Cpu has LZCNT.
kX86CpuFeatureLZCNT,
//! Cpu has AESNI.
kX86CpuFeatureAESNI,
//! Cpu has PCLMULQDQ.
kX86CpuFeaturePCLMULQDQ,
//! Cpu has RDRAND.
kX86CpuFeatureRDRAND,
//! Cpu has RDSEED.
kX86CpuFeatureRDSEED,
//! Cpu has SHA-1 and SHA-256.
kX86CpuFeatureSHA,
//! Cpu has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0.
kX86CpuFeatureXSAVE,
//! OS has enabled XSAVE, you can call XGETBV to get value of XCR0.
kX86CpuFeatureXSAVE_OS,
//! Cpu has AVX.
kX86CpuFeatureAVX,
//! Cpu has AVX2.
kX86CpuFeatureAVX2,
//! Cpu has F16C.
kX86CpuFeatureF16C,
//! Cpu has FMA3.
kX86CpuFeatureFMA3,
//! Cpu has FMA4.
kX86CpuFeatureFMA4,
//! Cpu has XOP.
kX86CpuFeatureXOP,
//! Cpu has BMI.
kX86CpuFeatureBMI,
//! Cpu has BMI2.
kX86CpuFeatureBMI2,
//! Cpu has HLE.
kX86CpuFeatureHLE,
//! Cpu has RTM.
kX86CpuFeatureRTM,
//! Cpu has ADX.
kX86CpuFeatureADX,
//! Cpu has MPX (Memory Protection Extensions).
kX86CpuFeatureMPX,
//! Cpu has FSGSBASE.
kX86CpuFeatureFSGSBASE,
//! Cpu has optimized REP MOVSB/STOSB.
kX86CpuFeatureMOVSBSTOSB_OPT,
//! Cpu has AVX-512F (Foundation).
kX86CpuFeatureAVX512F,
//! Cpu has AVX-512CD (Conflict Detection).
kX86CpuFeatureAVX512CD,
//! Cpu has AVX-512PF (Prefetch Instructions).
kX86CpuFeatureAVX512PF,
//! Cpu has AVX-512ER (Exponential and Reciprocal Instructions).
kX86CpuFeatureAVX512ER,
//! Cpu has AVX-512DQ (DWord/QWord).
kX86CpuFeatureAVX512DQ,
//! Cpu has AVX-512BW (Byte/Word).
kX86CpuFeatureAVX512BW,
//! Cpu has AVX VL (Vector Length Excensions).
kX86CpuFeatureAVX512VL,
//! Count of X86/X64 Cpu features.
kX86CpuFeatureCount
};
// ============================================================================
// [asmjit::X86CpuId]
// ============================================================================
//! X86/X64 CPUID output.
union X86CpuId {
//! EAX/EBX/ECX/EDX output.
uint32_t i[4];
struct {
//! EAX output.
uint32_t eax;
//! EBX output.
uint32_t ebx;
//! ECX output.
uint32_t ecx;
//! EDX output.
uint32_t edx;
};
};
// ============================================================================
// [asmjit::X86CpuUtil]
// ============================================================================
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
//! CPU utilities available only if the host processor is X86/X64.
struct X86CpuUtil {
//! \internal
//!
//! Designed to support VS2008 and less in 64-bit mode, even if this compiler
//! doesn't have `__cpuidex` intrinsic.
ASMJIT_API static void _docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* out);
//! Get the result of calling CPUID instruction to `out`.
static ASMJIT_INLINE void callCpuId(X86CpuId* out, uint32_t inEax, uint32_t inEcx = 0) {
return _docpuid(inEcx, inEax, out);
}
//! Detect the Host CPU.
ASMJIT_API static void detect(X86CpuInfo* cpuInfo);
};
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// ============================================================================
// [asmjit::X86CpuInfo]
// ============================================================================
struct X86CpuInfo : public CpuInfo {
ASMJIT_NO_COPY(X86CpuInfo)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
ASMJIT_INLINE X86CpuInfo();
// --------------------------------------------------------------------------
// [Accessors]
// --------------------------------------------------------------------------
//! Get processor type.
ASMJIT_INLINE uint32_t getProcessorType() const {
return _processorType;
}
//! Get brand index.
ASMJIT_INLINE uint32_t getBrandIndex() const {
return _brandIndex;
}
//! Get flush cache line size.
ASMJIT_INLINE uint32_t getFlushCacheLineSize() const {
return _flushCacheLineSize;
}
//! Get maximum logical processors count.
ASMJIT_INLINE uint32_t getMaxLogicalProcessors() const {
return _maxLogicalProcessors;
}
// --------------------------------------------------------------------------
// [Statics]
// --------------------------------------------------------------------------
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
//! Get global instance of `X86CpuInfo`.
static ASMJIT_INLINE const X86CpuInfo* getHost() {
return static_cast<const X86CpuInfo*>(CpuInfo::getHost());
}
#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Processor type.
uint32_t _processorType;
//! Brand index.
uint32_t _brandIndex;
//! Flush cache line size in bytes.
uint32_t _flushCacheLineSize;
//! Maximum number of addressable IDs for logical processors.
uint32_t _maxLogicalProcessors;
};
ASMJIT_INLINE X86CpuInfo::X86CpuInfo() :
CpuInfo(sizeof(X86CpuInfo)) {}
//! \}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // _ASMJIT_X86_X86CPUINFO_H

File diff suppressed because it is too large Load Diff

View File

@@ -34,12 +34,12 @@ struct X86InstExtendedInfo;
// [asmjit::X86Inst/X86Cond - Globals]
// ============================================================================
#if !defined(ASMJIT_DISABLE_NAMES)
#if !defined(ASMJIT_DISABLE_TEXT)
//! \internal
//!
//! X86/X64 instructions' names, accessible through `X86InstInfo`.
ASMJIT_VARAPI const char _x86InstName[];
#endif // !ASMJIT_DISABLE_NAMES
#endif // !ASMJIT_DISABLE_TEXT
//! \internal
//!
@@ -2209,7 +2209,7 @@ struct X86InstInfo {
// [Accessors - Instruction Name]
// --------------------------------------------------------------------------
#if !defined(ASMJIT_DISABLE_NAMES)
#if !defined(ASMJIT_DISABLE_TEXT)
//! Get instruction name string (null terminated).
ASMJIT_INLINE const char* getInstName() const {
return _x86InstName + static_cast<uint32_t>(_nameIndex);
@@ -2219,7 +2219,7 @@ struct X86InstInfo {
ASMJIT_INLINE uint32_t _getNameIndex() const {
return _nameIndex;
}
#endif // !ASMJIT_DISABLE_NAMES
#endif // !ASMJIT_DISABLE_TEXT
// --------------------------------------------------------------------------
// [Accessors - Extended-Info]
@@ -2335,54 +2335,54 @@ struct X86Util {
//!
//! \note `instId` has to be valid instruction ID, it can't be greater than
//! or equal to `_kX86InstIdCount`. It asserts in debug mode.
static ASMJIT_INLINE const X86InstInfo& getInstInfo(uint32_t instId) {
static ASMJIT_INLINE const X86InstInfo& getInstInfo(uint32_t instId) noexcept {
ASMJIT_ASSERT(instId < _kX86InstIdCount);
return _x86InstInfo[instId];
}
#if !defined(ASMJIT_DISABLE_NAMES)
#if !defined(ASMJIT_DISABLE_TEXT)
//! Get an instruction ID from a given instruction `name`.
//!
//! If there is an exact match the instruction id is returned, otherwise
//! `kInstIdNone` (zero) is returned.
//!
//! The given `name` doesn't have to be null-terminated if `len` is provided.
ASMJIT_API static uint32_t getInstIdByName(const char* name, size_t len = kInvalidIndex);
#endif // !ASMJIT_DISABLE_NAMES
ASMJIT_API static uint32_t getInstIdByName(const char* name, size_t len = kInvalidIndex) noexcept;
#endif // !ASMJIT_DISABLE_TEXT
// --------------------------------------------------------------------------
// [Condition Codes]
// --------------------------------------------------------------------------
//! Corresponds to transposing the operands of a comparison.
static ASMJIT_INLINE uint32_t reverseCond(uint32_t cond) {
static ASMJIT_INLINE uint32_t reverseCond(uint32_t cond) noexcept {
ASMJIT_ASSERT(cond < ASMJIT_ARRAY_SIZE(_x86ReverseCond));
return _x86ReverseCond[cond];
}
//! Get the equivalent of negated condition code.
static ASMJIT_INLINE uint32_t negateCond(uint32_t cond) {
static ASMJIT_INLINE uint32_t negateCond(uint32_t cond) noexcept {
ASMJIT_ASSERT(cond < ASMJIT_ARRAY_SIZE(_x86ReverseCond));
return cond ^ static_cast<uint32_t>(cond < kX86CondNone);
}
//! Translate condition code `cc` to `cmovcc` instruction code.
//! \sa \ref X86InstId, \ref _kX86InstIdCmovcc.
static ASMJIT_INLINE uint32_t condToCmovcc(uint32_t cond) {
static ASMJIT_INLINE uint32_t condToCmovcc(uint32_t cond) noexcept {
ASMJIT_ASSERT(static_cast<uint32_t>(cond) < ASMJIT_ARRAY_SIZE(_x86CondToCmovcc));
return _x86CondToCmovcc[cond];
}
//! Translate condition code `cc` to `jcc` instruction code.
//! \sa \ref X86InstId, \ref _kX86InstIdJcc.
static ASMJIT_INLINE uint32_t condToJcc(uint32_t cond) {
static ASMJIT_INLINE uint32_t condToJcc(uint32_t cond) noexcept {
ASMJIT_ASSERT(static_cast<uint32_t>(cond) < ASMJIT_ARRAY_SIZE(_x86CondToJcc));
return _x86CondToJcc[cond];
}
//! Translate condition code `cc` to `setcc` instruction code.
//! \sa \ref X86InstId, \ref _kX86InstIdSetcc.
static ASMJIT_INLINE uint32_t condToSetcc(uint32_t cond) {
static ASMJIT_INLINE uint32_t condToSetcc(uint32_t cond) noexcept {
ASMJIT_ASSERT(static_cast<uint32_t>(cond) < ASMJIT_ARRAY_SIZE(_x86CondToSetcc));
return _x86CondToSetcc[cond];
}
@@ -2397,8 +2397,8 @@ struct X86Util {
//! \param b Position of the second component [0, 1], inclusive.
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `X86Assembler::shufpd()` and `X86Compiler::shufpd()`
static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b) {
//! - `shufpd`
static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b) noexcept {
ASMJIT_ASSERT(a <= 0x1 && b <= 0x1);
uint32_t result = (a << 1) | b;
return static_cast<int>(result);
@@ -2412,12 +2412,12 @@ struct X86Util {
//! \param d Position of the fourth component [0, 3], inclusive.
//!
//! Shuffle constants can be used to encode an immediate for these instructions:
//! - `X86Assembler::pshufw()` and `X86Compiler::pshufw()`.
//! - `X86Assembler::pshufd()` and `X86Compiler::pshufd()`.
//! - `X86Assembler::pshufhw()` and `X86Compiler::pshufhw()`.
//! - `X86Assembler::pshuflw()` and `X86Compiler::pshuflw()`.
//! - `X86Assembler::shufps()` and `X86Compiler::shufps()`.
static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b, uint32_t c, uint32_t d) {
//! - `pshufw()`
//! - `pshufd()`
//! - `pshuflw()`
//! - `pshufhw()`
//! - `shufps()`
static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept {
ASMJIT_ASSERT(a <= 0x3 && b <= 0x3 && c <= 0x3 && d <= 0x3);
uint32_t result = (a << 6) | (b << 4) | (c << 2) | d;
return static_cast<int>(result);

View File

@@ -24,17 +24,17 @@ namespace x86 {
// [asmjit::X86Mem - abs[]]
// ============================================================================
X86Mem ptr_abs(Ptr pAbs, int32_t disp, uint32_t size) {
X86Mem ptr_abs(Ptr p, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue);
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue);
m._vmem.index = kInvalidValue;
m._vmem.displacement = static_cast<int32_t>((intptr_t)(pAbs + disp));
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) {
X86Mem ptr_abs(Ptr p, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
@@ -45,15 +45,15 @@ X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift, int32_t disp, uint
else if (index.isYmm())
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index.getRegIndex();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(pAbs + disp));
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}
#if !defined(ASMJIT_DISABLE_COMPILER)
X86Mem ptr_abs(Ptr pAbs, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) {
X86Mem ptr_abs(Ptr p, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) noexcept {
X86Mem m(NoInit);
uint32_t flags = shift << kX86MemShiftIndex;
@@ -67,9 +67,9 @@ X86Mem ptr_abs(Ptr pAbs, const X86Var& index, uint32_t shift, int32_t disp, uint
else if (indexRegType == kX86RegTypeYmm)
flags |= kX86MemVSibYmm << kX86MemVSibIndex;
m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue);
m._vmem.index = index_.getId();
m._vmem.displacement = static_cast<int32_t>((intptr_t)(pAbs + disp));
m._vmem.displacement = static_cast<int32_t>((intptr_t)(p + disp));
return m;
}

File diff suppressed because it is too large Load Diff

View File

@@ -6,7 +6,7 @@
// [Export]
#define ASMJIT_EXPORTS
#define ASMJIT_EXPORTS_X86OPERAND_REGS
#define ASMJIT_EXPORTS_X86_REGS
// [Guard]
#include "../build.h"
@@ -21,24 +21,48 @@
namespace asmjit {
#define REG(_Type_, _Index_, _Size_) {{{ \
kOperandTypeReg, _Size_, { ((_Type_) << 8) + _Index_ }, kInvalidValue, {{ kInvalidVar, 0 }} \
Operand::kTypeReg, _Size_, { ((_Type_) << 8) + _Index_ }, kInvalidValue, {{ kInvalidVar, 0 }} \
}}}
const X86RegData x86RegData = {
// RIP.
REG(kX86RegTypeRip, 0, 0),
// NpGp.
REG(kInvalidReg, kInvalidReg, 0),
// Segments.
// Gpd.
{
REG(kX86RegTypeSeg, 0, 2), // Default.
REG(kX86RegTypeSeg, 1, 2), // ES.
REG(kX86RegTypeSeg, 2, 2), // CS.
REG(kX86RegTypeSeg, 3, 2), // SS.
REG(kX86RegTypeSeg, 4, 2), // DS.
REG(kX86RegTypeSeg, 5, 2), // FS.
REG(kX86RegTypeSeg, 6, 2) // GS.
REG(kX86RegTypeGpd, 0 , 4),
REG(kX86RegTypeGpd, 1 , 4),
REG(kX86RegTypeGpd, 2 , 4),
REG(kX86RegTypeGpd, 3 , 4),
REG(kX86RegTypeGpd, 4 , 4),
REG(kX86RegTypeGpd, 5 , 4),
REG(kX86RegTypeGpd, 6 , 4),
REG(kX86RegTypeGpd, 7 , 4),
REG(kX86RegTypeGpd, 8 , 4),
REG(kX86RegTypeGpd, 9 , 4),
REG(kX86RegTypeGpd, 10, 4),
REG(kX86RegTypeGpd, 11, 4),
REG(kX86RegTypeGpd, 12, 4),
REG(kX86RegTypeGpd, 13, 4),
REG(kX86RegTypeGpd, 14, 4),
REG(kX86RegTypeGpd, 15, 4)
},
// Gpq.
{
REG(kX86RegTypeGpq, 0 , 8),
REG(kX86RegTypeGpq, 1 , 8),
REG(kX86RegTypeGpq, 2 , 8),
REG(kX86RegTypeGpq, 3 , 8),
REG(kX86RegTypeGpq, 4 , 8),
REG(kX86RegTypeGpq, 5 , 8),
REG(kX86RegTypeGpq, 6 , 8),
REG(kX86RegTypeGpq, 7 , 8),
REG(kX86RegTypeGpq, 8 , 8),
REG(kX86RegTypeGpq, 9 , 8),
REG(kX86RegTypeGpq, 10, 8),
REG(kX86RegTypeGpq, 11, 8),
REG(kX86RegTypeGpq, 12, 8),
REG(kX86RegTypeGpq, 13, 8),
REG(kX86RegTypeGpq, 14, 8),
REG(kX86RegTypeGpq, 15, 8)
},
// GpbLo.
@@ -89,82 +113,6 @@ const X86RegData x86RegData = {
REG(kX86RegTypeGpw, 15, 2)
},
// Gpd.
{
REG(kX86RegTypeGpd, 0, 4),
REG(kX86RegTypeGpd, 1, 4),
REG(kX86RegTypeGpd, 2, 4),
REG(kX86RegTypeGpd, 3, 4),
REG(kX86RegTypeGpd, 4, 4),
REG(kX86RegTypeGpd, 5, 4),
REG(kX86RegTypeGpd, 6, 4),
REG(kX86RegTypeGpd, 7, 4),
REG(kX86RegTypeGpd, 8, 4),
REG(kX86RegTypeGpd, 9, 4),
REG(kX86RegTypeGpd, 10, 4),
REG(kX86RegTypeGpd, 11, 4),
REG(kX86RegTypeGpd, 12, 4),
REG(kX86RegTypeGpd, 13, 4),
REG(kX86RegTypeGpd, 14, 4),
REG(kX86RegTypeGpd, 15, 4)
},
// Gpq.
{
REG(kX86RegTypeGpq, 0, 8),
REG(kX86RegTypeGpq, 1, 8),
REG(kX86RegTypeGpq, 2, 8),
REG(kX86RegTypeGpq, 3, 8),
REG(kX86RegTypeGpq, 4, 8),
REG(kX86RegTypeGpq, 5, 8),
REG(kX86RegTypeGpq, 6, 8),
REG(kX86RegTypeGpq, 7, 8),
REG(kX86RegTypeGpq, 8, 8),
REG(kX86RegTypeGpq, 9, 8),
REG(kX86RegTypeGpq, 10, 8),
REG(kX86RegTypeGpq, 11, 8),
REG(kX86RegTypeGpq, 12, 8),
REG(kX86RegTypeGpq, 13, 8),
REG(kX86RegTypeGpq, 14, 8),
REG(kX86RegTypeGpq, 15, 8)
},
// Fp.
{
REG(kX86RegTypeFp, 0, 10),
REG(kX86RegTypeFp, 1, 10),
REG(kX86RegTypeFp, 2, 10),
REG(kX86RegTypeFp, 3, 10),
REG(kX86RegTypeFp, 4, 10),
REG(kX86RegTypeFp, 5, 10),
REG(kX86RegTypeFp, 6, 10),
REG(kX86RegTypeFp, 7, 10)
},
// Mm.
{
REG(kX86RegTypeMm, 0, 8),
REG(kX86RegTypeMm, 1, 8),
REG(kX86RegTypeMm, 2, 8),
REG(kX86RegTypeMm, 3, 8),
REG(kX86RegTypeMm, 4, 8),
REG(kX86RegTypeMm, 5, 8),
REG(kX86RegTypeMm, 6, 8),
REG(kX86RegTypeMm, 7, 8)
},
// K.
{
REG(kX86RegTypeK, 0, 8),
REG(kX86RegTypeK, 1, 8),
REG(kX86RegTypeK, 2, 8),
REG(kX86RegTypeK, 3, 8),
REG(kX86RegTypeK, 4, 8),
REG(kX86RegTypeK, 5, 8),
REG(kX86RegTypeK, 6, 8),
REG(kX86RegTypeK, 7, 8)
},
// Xmm.
{
REG(kX86RegTypeXmm, 0 , 16),
@@ -271,7 +219,59 @@ const X86RegData x86RegData = {
REG(kX86RegTypeZmm, 29, 64),
REG(kX86RegTypeZmm, 30, 64),
REG(kX86RegTypeZmm, 31, 64)
}
},
// K.
{
REG(kX86RegTypeK, 0, 8),
REG(kX86RegTypeK, 1, 8),
REG(kX86RegTypeK, 2, 8),
REG(kX86RegTypeK, 3, 8),
REG(kX86RegTypeK, 4, 8),
REG(kX86RegTypeK, 5, 8),
REG(kX86RegTypeK, 6, 8),
REG(kX86RegTypeK, 7, 8)
},
// Fp.
{
REG(kX86RegTypeFp, 0, 10),
REG(kX86RegTypeFp, 1, 10),
REG(kX86RegTypeFp, 2, 10),
REG(kX86RegTypeFp, 3, 10),
REG(kX86RegTypeFp, 4, 10),
REG(kX86RegTypeFp, 5, 10),
REG(kX86RegTypeFp, 6, 10),
REG(kX86RegTypeFp, 7, 10)
},
// Mm.
{
REG(kX86RegTypeMm, 0, 8),
REG(kX86RegTypeMm, 1, 8),
REG(kX86RegTypeMm, 2, 8),
REG(kX86RegTypeMm, 3, 8),
REG(kX86RegTypeMm, 4, 8),
REG(kX86RegTypeMm, 5, 8),
REG(kX86RegTypeMm, 6, 8),
REG(kX86RegTypeMm, 7, 8)
},
// Segments.
{
REG(kX86RegTypeSeg, 0, 2), // Default.
REG(kX86RegTypeSeg, 1, 2), // ES.
REG(kX86RegTypeSeg, 2, 2), // CS.
REG(kX86RegTypeSeg, 3, 2), // SS.
REG(kX86RegTypeSeg, 4, 2), // DS.
REG(kX86RegTypeSeg, 5, 2), // FS.
REG(kX86RegTypeSeg, 6, 2) // GS.
},
// NoGp.
REG(kInvalidReg, kInvalidReg, 0),
// RIP.
REG(kX86RegTypeRip, 0, 0),
};
#undef REG

View File

@@ -1,94 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Export]
#define ASMJIT_EXPORTS
// [Guard]
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64))
// [Dependencies - AsmJit]
#include "../base/containers.h"
#include "../x86/x86scheduler_p.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [Internals]
// ============================================================================
//! \internal
struct X86ScheduleData {
//! Registers read by the instruction.
X86RegMask regsIn;
//! Registers written by the instruction.
X86RegMask regsOut;
//! Flags read by the instruction.
uint8_t flagsIn;
//! Flags written by the instruction.
uint8_t flagsOut;
//! How many `uops` or `cycles` the instruction takes.
uint8_t ops;
//! Instruction latency.
uint8_t latency;
//! Which ports the instruction can run at.
uint16_t ports;
//! \internal
uint16_t reserved;
//! All instructions that this instruction depends on.
PodList<HLInst*>::Link* dependsOn;
//! All instructions that use the result of this instruction.
PodList<HLInst*>::Link* usedBy;
};
// ============================================================================
// [asmjit::X86Scheduler - Construction / Destruction]
// ============================================================================
X86Scheduler::X86Scheduler(X86Compiler* compiler, const X86CpuInfo* cpuInfo) :
_compiler(compiler),
_cpuInfo(cpuInfo) {}
X86Scheduler::~X86Scheduler() {}
// ============================================================================
// [asmjit::X86Scheduler - Run]
// ============================================================================
Error X86Scheduler::run(HLNode* start, HLNode* stop) {
/*
ASMJIT_TLOG("[Schedule] === Begin ===");
Zone zone(8096 - Zone::kZoneOverhead);
HLNode* node_ = start;
while (node_ != stop) {
HLNode* next = node_->getNext();
ASMJIT_ASSERT(node_->getType() == kHLNodeTypeInst);
printf(" %s\n", X86Util::getInstInfo(static_cast<HLInst*>(node_)->getInstId()).getInstName());
node_ = next;
}
ASMJIT_TLOG("[Schedule] === End ===");
*/
return kErrorOk;
}
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64)

View File

@@ -1,63 +0,0 @@
// [AsmJit]
// Complete x86/x64 JIT and Remote Assembler for C++.
//
// [License]
// Zlib - See LICENSE.md file in the package.
// [Guard]
#ifndef _ASMJIT_X86_X86SCHEDULER_P_H
#define _ASMJIT_X86_X86SCHEDULER_P_H
#include "../build.h"
#if !defined(ASMJIT_DISABLE_COMPILER)
// [Dependencies - AsmJit]
#include "../x86/x86compiler.h"
#include "../x86/x86compilercontext_p.h"
#include "../x86/x86cpuinfo.h"
#include "../x86/x86inst.h"
// [Api-Begin]
#include "../apibegin.h"
namespace asmjit {
// ============================================================================
// [asmjit::X86Scheduler]
// ============================================================================
//! \internal
//!
//! X86 scheduler.
struct X86Scheduler {
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
X86Scheduler(X86Compiler* compiler, const X86CpuInfo* cpuInfo);
~X86Scheduler();
// --------------------------------------------------------------------------
// [Run]
// --------------------------------------------------------------------------
Error run(HLNode* start, HLNode* stop);
// --------------------------------------------------------------------------
// [Members]
// --------------------------------------------------------------------------
//! Attached compiler.
X86Compiler* _compiler;
//! CPU information used for scheduling.
const X86CpuInfo* _cpuInfo;
};
} // asmjit namespace
// [Api-End]
#include "../apiend.h"
// [Guard]
#endif // !ASMJIT_DISABLE_COMPILER
#endif // _ASMJIT_X86_X86SCHEDULER_P_H

View File

@@ -16,6 +16,68 @@
#include <stdlib.h>
#include <string.h>
// ============================================================================
// [Configuration]
// ============================================================================
static const uint32_t kNumRepeats = 10;
static const uint32_t kNumIterations = 5000;
// ============================================================================
// [TestRuntime]
// ============================================================================
struct TestRuntime : public asmjit::Runtime {
ASMJIT_NO_COPY(TestRuntime)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
TestRuntime(uint32_t arch, uint32_t callConv) ASMJIT_NOEXCEPT {
_cpuInfo.setArch(arch);
_stackAlignment = 16;
_baseAddress = 0;
_cdeclConv = static_cast<uint8_t>(callConv);
_stdCallConv = static_cast<uint8_t>(callConv);
}
virtual ~TestRuntime() ASMJIT_NOEXCEPT {}
// --------------------------------------------------------------------------
// [Interface]
// --------------------------------------------------------------------------
virtual asmjit::Error add(void** dst, asmjit::Assembler* assembler) ASMJIT_NOEXCEPT {
size_t codeSize = assembler->getCodeSize();
if (codeSize == 0) {
*dst = NULL;
return asmjit::kErrorNoCodeGenerated;
}
void* p = ::malloc(codeSize);
if (p == NULL) {
*dst = NULL;
return asmjit::kErrorNoHeapMemory;
}
size_t relocSize = assembler->relocCode(p, _baseAddress);
if (relocSize == 0) {
::free(p);
*dst = NULL;
return asmjit::kErrorInvalidState;
}
*dst = p;
return asmjit::kErrorOk;
}
virtual asmjit::Error release(void* p) ASMJIT_NOEXCEPT {
::free(p);
return asmjit::kErrorOk;
}
};
// ============================================================================
// [Performance]
// ============================================================================
@@ -49,34 +111,39 @@ struct Performance {
uint32_t best;
};
static double mbps(uint32_t time, size_t outputSize) {
double bytesTotal = static_cast<double>(outputSize);
return (bytesTotal * 1000) / (static_cast<double>(time) * 1024 * 1024);
}
// ============================================================================
// [Main]
// ============================================================================
static uint32_t instPerMs(uint32_t time, uint32_t numIterations, uint32_t instPerIteration) {
return static_cast<uint32_t>(
static_cast<uint64_t>(numIterations) * instPerIteration * 1000 / time);
}
int main(int argc, char* argv[]) {
#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)
static void benchX86(uint32_t arch, uint32_t callConv) {
using namespace asmjit;
Performance perf;
uint32_t kNumRepeats = 10;
uint32_t kNumIterations = 10000;
TestRuntime runtime(arch, callConv);
JitRuntime runtime;
X86Assembler a(&runtime);
X86Assembler a(&runtime, arch);
X86Compiler c;
uint32_t r, i;
const char* archName = arch == kArchX86 ? "X86" : "X64";
// --------------------------------------------------------------------------
// [Bench - Opcode]
// --------------------------------------------------------------------------
size_t asmOutputSize = 0;
size_t cmpOutputSize = 0;
perf.reset();
for (r = 0; r < kNumRepeats; r++) {
asmOutputSize = 0;
perf.start();
for (i = 0; i < kNumIterations; i++) {
asmgen::opcode(a);
@@ -84,14 +151,14 @@ int main(int argc, char* argv[]) {
void *p = a.make();
runtime.release(p);
asmOutputSize += a.getCodeSize();
a.reset();
}
perf.end();
}
printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
"Assembler [GenOpCode]",
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenOpCodeInstCount));
printf("%-12s (%s) | Time: %-6u [ms] | Speed: %7.3f [MB/s]\n",
"X86Assembler", archName, perf.best, mbps(perf.best, asmOutputSize));
// --------------------------------------------------------------------------
// [Bench - Blend]
@@ -99,6 +166,7 @@ int main(int argc, char* argv[]) {
perf.reset();
for (r = 0; r < kNumRepeats; r++) {
cmpOutputSize = 0;
perf.start();
for (i = 0; i < kNumIterations; i++) {
c.attach(&a);
@@ -108,14 +176,24 @@ int main(int argc, char* argv[]) {
void* p = a.make();
runtime.release(p);
cmpOutputSize += a.getCodeSize();
a.reset();
}
perf.end();
}
printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n",
"Compiler [GenBlend]",
perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenBlendInstCount));
printf("%-12s (%s) | Time: %-6u [ms] | Speed: %7.3f [MB/s]\n",
"X86Compiler", archName, perf.best, mbps(perf.best, cmpOutputSize));
}
#endif
int main(int argc, char* argv[]) {
#if defined(ASMJIT_BUILD_X86)
benchX86(asmjit::kArchX86, asmjit::kCallConvX86CDecl);
#endif
#if defined(ASMJIT_BUILD_X64)
benchX86(asmjit::kArchX64, asmjit::kCallConvX64Unix);
#endif
return 0;
}

View File

@@ -32,14 +32,15 @@ static const char* archIdToString(uint32_t archId) {
case asmjit::kArchNone : return "None";
case asmjit::kArchX86 : return "X86";
case asmjit::kArchX64 : return "X64";
case asmjit::kArchArm: return "ARM";
case asmjit::kArchArm32: return "ARM32";
case asmjit::kArchArm64: return "ARM64";
default: return "<unknown>";
}
}
int main(int argc, char* argv[]) {
asmjit::FileLogger logger(stdout);
logger.setOption(asmjit::kLoggerOptionBinaryForm, true);
logger.addOptions(asmjit::Logger::kOptionBinaryForm);
OpcodeDumpInfo infoList[] = {
# if defined(ASMJIT_BUILD_X86)

View File

@@ -13,8 +13,6 @@
namespace asmgen {
enum { kGenOpCodeInstCount = 2690 };
// Generate all instructions asmjit can emit.
static void opcode(asmjit::X86Assembler& a, bool useRex1 = false, bool useRex2 = false) {
using namespace asmjit;

View File

@@ -6,8 +6,6 @@
// [Dependencies - AsmJit]
#include "../asmjit/asmjit.h"
#include "../asmjit/base/compilercontext_p.h"
#include "../asmjit/x86/x86compilercontext_p.h"
// ============================================================================
// [DumpCpu]
@@ -18,101 +16,129 @@ struct DumpCpuFeature {
const char* name;
};
static void dumpCpuFeatures(const asmjit::CpuInfo* cpuInfo, const DumpCpuFeature* data, size_t count) {
static void dumpCpuFeatures(const asmjit::CpuInfo& cpu, const DumpCpuFeature* data, size_t count) {
for (size_t i = 0; i < count; i++)
if (cpuInfo->hasFeature(data[i].feature))
if (cpu.hasFeature(data[i].feature))
INFO(" %s", data[i].name);
}
static void dumpCpu(void) {
const asmjit::CpuInfo* cpu = asmjit::CpuInfo::getHost();
const asmjit::CpuInfo& cpu = asmjit::CpuInfo::getHost();
INFO("Host CPU Info:");
INFO(" Vendor string : %s", cpu->getVendorString());
INFO(" Brand string : %s", cpu->getBrandString());
INFO(" Family : %u", cpu->getFamily());
INFO(" Model : %u", cpu->getModel());
INFO(" Stepping : %u", cpu->getStepping());
INFO(" HW-Threads Count : %u", cpu->getHwThreadsCount());
INFO("Host CPU:");
INFO(" Vendor string : %s", cpu.getVendorString());
INFO(" Brand string : %s", cpu.getBrandString());
INFO(" Family : %u", cpu.getFamily());
INFO(" Model : %u", cpu.getModel());
INFO(" Stepping : %u", cpu.getStepping());
INFO(" HW-Threads Count : %u", cpu.getHwThreadsCount());
INFO("");
// --------------------------------------------------------------------------
// [X86]
// [ARM / ARM64]
// --------------------------------------------------------------------------
#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64
static const DumpCpuFeature armFeaturesList[] = {
{ asmjit::CpuInfo::kArmFeatureV6 , "ARMv6" },
{ asmjit::CpuInfo::kArmFeatureV7 , "ARMv7" },
{ asmjit::CpuInfo::kArmFeatureV8 , "ARMv8" },
{ asmjit::CpuInfo::kArmFeatureTHUMB , "THUMB" },
{ asmjit::CpuInfo::kArmFeatureTHUMB2 , "THUMBv2" },
{ asmjit::CpuInfo::kArmFeatureVFP2 , "VFPv2" },
{ asmjit::CpuInfo::kArmFeatureVFP3 , "VFPv3" },
{ asmjit::CpuInfo::kArmFeatureVFP4 , "VFPv4" },
{ asmjit::CpuInfo::kArmFeatureVFP_D32 , "VFP D32" },
{ asmjit::CpuInfo::kArmFeatureNEON , "NEON" },
{ asmjit::CpuInfo::kArmFeatureDSP , "DSP" },
{ asmjit::CpuInfo::kArmFeatureIDIV , "IDIV" },
{ asmjit::CpuInfo::kArmFeatureAES , "AES" },
{ asmjit::CpuInfo::kArmFeatureCRC32 , "CRC32" },
{ asmjit::CpuInfo::kArmFeatureSHA1 , "SHA1" },
{ asmjit::CpuInfo::kArmFeatureSHA256 , "SHA256" },
{ asmjit::CpuInfo::kArmFeatureAtomics64 , "64-bit atomics" }
};
INFO("ARM Features:");
dumpCpuFeatures(cpu, armFeaturesList, ASMJIT_ARRAY_SIZE(armFeaturesList));
INFO("");
#endif
// --------------------------------------------------------------------------
// [X86 / X64]
// --------------------------------------------------------------------------
#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64
const asmjit::X86CpuInfo* x86Cpu = static_cast<const asmjit::X86CpuInfo*>(cpu);
static const DumpCpuFeature x86FeaturesList[] = {
{ asmjit::kX86CpuFeatureNX , "NX (Non-Execute Bit)" },
{ asmjit::kX86CpuFeatureMT , "MT (Multi-Threading)" },
{ asmjit::kX86CpuFeatureRDTSC , "RDTSC" },
{ asmjit::kX86CpuFeatureRDTSCP , "RDTSCP" },
{ asmjit::kX86CpuFeatureCMOV , "CMOV" },
{ asmjit::kX86CpuFeatureCMPXCHG8B , "CMPXCHG8B" },
{ asmjit::kX86CpuFeatureCMPXCHG16B , "CMPXCHG16B" },
{ asmjit::kX86CpuFeatureCLFLUSH , "CLFLUSH" },
{ asmjit::kX86CpuFeatureCLFLUSH_OPT , "CLFLUSH (Opt)" },
{ asmjit::kX86CpuFeaturePREFETCH , "PREFETCH" },
{ asmjit::kX86CpuFeaturePREFETCHWT1 , "PREFETCHWT1" },
{ asmjit::kX86CpuFeatureLahfSahf , "LAHF/SAHF" },
{ asmjit::kX86CpuFeatureFXSR , "FXSR" },
{ asmjit::kX86CpuFeatureFXSR_OPT , "FXSR (Opt)" },
{ asmjit::kX86CpuFeatureMMX , "MMX" },
{ asmjit::kX86CpuFeatureMMX2 , "MMX2" },
{ asmjit::kX86CpuFeature3DNOW , "3DNOW" },
{ asmjit::kX86CpuFeature3DNOW2 , "3DNOW2" },
{ asmjit::kX86CpuFeatureSSE , "SSE" },
{ asmjit::kX86CpuFeatureSSE2 , "SSE2" },
{ asmjit::kX86CpuFeatureSSE3 , "SSE3" },
{ asmjit::kX86CpuFeatureSSSE3 , "SSSE3" },
{ asmjit::kX86CpuFeatureSSE4A , "SSE4A" },
{ asmjit::kX86CpuFeatureSSE4_1 , "SSE4.1" },
{ asmjit::kX86CpuFeatureSSE4_2 , "SSE4.2" },
{ asmjit::kX86CpuFeatureMSSE , "Misaligned SSE" },
{ asmjit::kX86CpuFeatureMONITOR , "MONITOR/MWAIT" },
{ asmjit::kX86CpuFeatureMOVBE , "MOVBE" },
{ asmjit::kX86CpuFeaturePOPCNT , "POPCNT" },
{ asmjit::kX86CpuFeatureLZCNT , "LZCNT" },
{ asmjit::kX86CpuFeatureAESNI , "AESNI" },
{ asmjit::kX86CpuFeaturePCLMULQDQ , "PCLMULQDQ" },
{ asmjit::kX86CpuFeatureRDRAND , "RDRAND" },
{ asmjit::kX86CpuFeatureRDSEED , "RDSEED" },
{ asmjit::kX86CpuFeatureSHA , "SHA" },
{ asmjit::kX86CpuFeatureXSAVE , "XSAVE" },
{ asmjit::kX86CpuFeatureXSAVE_OS , "XSAVE (OS)" },
{ asmjit::kX86CpuFeatureAVX , "AVX" },
{ asmjit::kX86CpuFeatureAVX2 , "AVX2" },
{ asmjit::kX86CpuFeatureF16C , "F16C" },
{ asmjit::kX86CpuFeatureFMA3 , "FMA3" },
{ asmjit::kX86CpuFeatureFMA4 , "FMA4" },
{ asmjit::kX86CpuFeatureXOP , "XOP" },
{ asmjit::kX86CpuFeatureBMI , "BMI" },
{ asmjit::kX86CpuFeatureBMI2 , "BMI2" },
{ asmjit::kX86CpuFeatureHLE , "HLE" },
{ asmjit::kX86CpuFeatureRTM , "RTM" },
{ asmjit::kX86CpuFeatureADX , "ADX" },
{ asmjit::kX86CpuFeatureMPX , "MPX" },
{ asmjit::kX86CpuFeatureFSGSBASE , "FS/GS Base" },
{ asmjit::kX86CpuFeatureMOVSBSTOSB_OPT, "REP MOVSB/STOSB (Opt)" },
{ asmjit::kX86CpuFeatureAVX512F , "AVX512F" },
{ asmjit::kX86CpuFeatureAVX512CD , "AVX512CD" },
{ asmjit::kX86CpuFeatureAVX512PF , "AVX512PF" },
{ asmjit::kX86CpuFeatureAVX512ER , "AVX512ER" },
{ asmjit::kX86CpuFeatureAVX512DQ , "AVX512DQ" },
{ asmjit::kX86CpuFeatureAVX512BW , "AVX512BW" },
{ asmjit::kX86CpuFeatureAVX512VL , "AVX512VL" }
{ asmjit::CpuInfo::kX86FeatureNX , "NX (Non-Execute Bit)" },
{ asmjit::CpuInfo::kX86FeatureMT , "MT (Multi-Threading)" },
{ asmjit::CpuInfo::kX86FeatureRDTSC , "RDTSC" },
{ asmjit::CpuInfo::kX86FeatureRDTSCP , "RDTSCP" },
{ asmjit::CpuInfo::kX86FeatureCMOV , "CMOV" },
{ asmjit::CpuInfo::kX86FeatureCMPXCHG8B , "CMPXCHG8B" },
{ asmjit::CpuInfo::kX86FeatureCMPXCHG16B , "CMPXCHG16B" },
{ asmjit::CpuInfo::kX86FeatureCLFLUSH , "CLFLUSH" },
{ asmjit::CpuInfo::kX86FeatureCLFLUSH_OPT , "CLFLUSH (Opt)" },
{ asmjit::CpuInfo::kX86FeaturePREFETCH , "PREFETCH" },
{ asmjit::CpuInfo::kX86FeaturePREFETCHWT1 , "PREFETCHWT1" },
{ asmjit::CpuInfo::kX86FeatureLAHF_SAHF , "LAHF/SAHF" },
{ asmjit::CpuInfo::kX86FeatureFXSR , "FXSR" },
{ asmjit::CpuInfo::kX86FeatureFXSR_OPT , "FXSR (Opt)" },
{ asmjit::CpuInfo::kX86FeatureMMX , "MMX" },
{ asmjit::CpuInfo::kX86FeatureMMX2 , "MMX2" },
{ asmjit::CpuInfo::kX86Feature3DNOW , "3DNOW" },
{ asmjit::CpuInfo::kX86Feature3DNOW2 , "3DNOW2" },
{ asmjit::CpuInfo::kX86FeatureSSE , "SSE" },
{ asmjit::CpuInfo::kX86FeatureSSE2 , "SSE2" },
{ asmjit::CpuInfo::kX86FeatureSSE3 , "SSE3" },
{ asmjit::CpuInfo::kX86FeatureSSSE3 , "SSSE3" },
{ asmjit::CpuInfo::kX86FeatureSSE4A , "SSE4A" },
{ asmjit::CpuInfo::kX86FeatureSSE4_1 , "SSE4.1" },
{ asmjit::CpuInfo::kX86FeatureSSE4_2 , "SSE4.2" },
{ asmjit::CpuInfo::kX86FeatureMSSE , "Misaligned SSE" },
{ asmjit::CpuInfo::kX86FeatureMONITOR , "MONITOR/MWAIT" },
{ asmjit::CpuInfo::kX86FeatureMOVBE , "MOVBE" },
{ asmjit::CpuInfo::kX86FeaturePOPCNT , "POPCNT" },
{ asmjit::CpuInfo::kX86FeatureLZCNT , "LZCNT" },
{ asmjit::CpuInfo::kX86FeatureAESNI , "AESNI" },
{ asmjit::CpuInfo::kX86FeaturePCLMULQDQ , "PCLMULQDQ" },
{ asmjit::CpuInfo::kX86FeatureRDRAND , "RDRAND" },
{ asmjit::CpuInfo::kX86FeatureRDSEED , "RDSEED" },
{ asmjit::CpuInfo::kX86FeatureSHA , "SHA" },
{ asmjit::CpuInfo::kX86FeatureXSAVE , "XSAVE" },
{ asmjit::CpuInfo::kX86FeatureXSAVE_OS , "XSAVE (OS)" },
{ asmjit::CpuInfo::kX86FeatureAVX , "AVX" },
{ asmjit::CpuInfo::kX86FeatureAVX2 , "AVX2" },
{ asmjit::CpuInfo::kX86FeatureF16C , "F16C" },
{ asmjit::CpuInfo::kX86FeatureFMA3 , "FMA3" },
{ asmjit::CpuInfo::kX86FeatureFMA4 , "FMA4" },
{ asmjit::CpuInfo::kX86FeatureXOP , "XOP" },
{ asmjit::CpuInfo::kX86FeatureBMI , "BMI" },
{ asmjit::CpuInfo::kX86FeatureBMI2 , "BMI2" },
{ asmjit::CpuInfo::kX86FeatureHLE , "HLE" },
{ asmjit::CpuInfo::kX86FeatureRTM , "RTM" },
{ asmjit::CpuInfo::kX86FeatureADX , "ADX" },
{ asmjit::CpuInfo::kX86FeatureMPX , "MPX" },
{ asmjit::CpuInfo::kX86FeatureFSGSBASE , "FS/GS Base" },
{ asmjit::CpuInfo::kX86FeatureMOVSBSTOSB_OPT, "REP MOVSB/STOSB (Opt)" },
{ asmjit::CpuInfo::kX86FeatureAVX512F , "AVX512F" },
{ asmjit::CpuInfo::kX86FeatureAVX512CD , "AVX512CD" },
{ asmjit::CpuInfo::kX86FeatureAVX512PF , "AVX512PF" },
{ asmjit::CpuInfo::kX86FeatureAVX512ER , "AVX512ER" },
{ asmjit::CpuInfo::kX86FeatureAVX512DQ , "AVX512DQ" },
{ asmjit::CpuInfo::kX86FeatureAVX512BW , "AVX512BW" },
{ asmjit::CpuInfo::kX86FeatureAVX512VL , "AVX512VL" }
};
INFO("Host CPU Info (X86/X64):");
INFO(" Processor Type : %u", x86Cpu->getProcessorType());
INFO(" Brand Index : %u", x86Cpu->getBrandIndex());
INFO(" CL Flush Cache Line : %u", x86Cpu->getFlushCacheLineSize());
INFO(" Max logical Processors : %u", x86Cpu->getMaxLogicalProcessors());
INFO("X86 Specific:");
INFO(" Processor Type : %u", cpu.getX86ProcessorType());
INFO(" Brand Index : %u", cpu.getX86BrandIndex());
INFO(" CL Flush Cache Line : %u", cpu.getX86FlushCacheLineSize());
INFO(" Max logical Processors : %u", cpu.getX86MaxLogicalProcessors());
INFO("");
INFO("Host CPU Features (X86/X64):");
dumpCpuFeatures(x86Cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList));
INFO("X86 Features:");
dumpCpuFeatures(cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList));
INFO("");
#endif
}
@@ -137,14 +163,17 @@ static void dumpSizeOf(void) {
DUMP_TYPE(float);
DUMP_TYPE(double);
DUMP_TYPE(void*);
DUMP_TYPE(asmjit::Ptr);
DUMP_TYPE(asmjit::SignedPtr);
INFO("");
INFO("SizeOf Base:");
DUMP_TYPE(asmjit::Assembler);
DUMP_TYPE(asmjit::ConstPool);
DUMP_TYPE(asmjit::LabelData);
DUMP_TYPE(asmjit::RelocData);
DUMP_TYPE(asmjit::Runtime);
DUMP_TYPE(asmjit::Zone);
DUMP_TYPE(asmjit::Ptr);
DUMP_TYPE(asmjit::SignedPtr);
INFO("");
INFO("SizeOf Operand:");
@@ -156,19 +185,9 @@ static void dumpSizeOf(void) {
DUMP_TYPE(asmjit::Label);
INFO("");
INFO("SizeOf Assembler:");
DUMP_TYPE(asmjit::Assembler);
DUMP_TYPE(asmjit::LabelData);
DUMP_TYPE(asmjit::RelocData);
INFO("");
#if !defined(ASMJIT_DISABLE_COMPILER)
INFO("SizeOf Compiler:");
DUMP_TYPE(asmjit::Compiler);
DUMP_TYPE(asmjit::VarMap);
DUMP_TYPE(asmjit::VarAttr);
DUMP_TYPE(asmjit::VarData);
DUMP_TYPE(asmjit::VarState);
DUMP_TYPE(asmjit::HLNode);
DUMP_TYPE(asmjit::HLInst);
DUMP_TYPE(asmjit::HLJump);
@@ -197,9 +216,6 @@ static void dumpSizeOf(void) {
#if !defined(ASMJIT_DISABLE_COMPILER)
DUMP_TYPE(asmjit::X86Compiler);
DUMP_TYPE(asmjit::X86VarMap);
DUMP_TYPE(asmjit::X86VarInfo);
DUMP_TYPE(asmjit::X86VarState);
DUMP_TYPE(asmjit::X86CallNode);
DUMP_TYPE(asmjit::X86FuncNode);
DUMP_TYPE(asmjit::X86FuncDecl);

View File

@@ -189,9 +189,9 @@ struct X86Test_JumpCross : public X86Test {
virtual void compile(X86Compiler& c) {
c.addFunc(FuncBuilder0<Void>(kCallConvHost));
Label L_1(c);
Label L_2(c);
Label L_3(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
Label L_3 = c.newLabel();
c.jmp(L_2);
@@ -230,13 +230,13 @@ struct X86Test_JumpUnreachable1 : public X86Test {
virtual void compile(X86Compiler& c) {
c.addFunc(FuncBuilder0<Void>(kCallConvHost));
Label L_1(c);
Label L_2(c);
Label L_3(c);
Label L_4(c);
Label L_5(c);
Label L_6(c);
Label L_7(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
Label L_3 = c.newLabel();
Label L_4 = c.newLabel();
Label L_5 = c.newLabel();
Label L_6 = c.newLabel();
Label L_7 = c.newLabel();
X86GpVar v0 = c.newUInt32("v0");
X86GpVar v1 = c.newUInt32("v1");
@@ -292,8 +292,8 @@ struct X86Test_JumpUnreachable2 : public X86Test {
virtual void compile(X86Compiler& c) {
c.addFunc(FuncBuilder0<Void>(kCallConvHost));
Label L_1(c);
Label L_2(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
X86GpVar v0 = c.newUInt32("v0");
X86GpVar v1 = c.newUInt32("v1");
@@ -397,7 +397,7 @@ struct X86Test_AllocManual : public X86Test {
c.spill(v0);
c.spill(v1);
Label L(c);
Label L = c.newLabel();
c.mov(cnt, 32);
c.bind(L);
@@ -449,7 +449,7 @@ struct X86Test_AllocUseMem : public X86Test {
X86GpVar aIdx = c.newInt32("aIdx");
X86GpVar aEnd = c.newInt32("aEnd");
Label L_1(c);
Label L_1 = c.newLabel();
c.setArg(0, aIdx);
c.setArg(1, aEnd);
@@ -589,7 +589,7 @@ struct X86Test_AllocMany2 : public X86Test {
}
X86GpVar v0 = c.newInt32("v0");
Label L(c);
Label L = c.newLabel();
c.mov(v0, 32);
c.bind(L);
@@ -1034,8 +1034,8 @@ struct X86Test_AllocIfElse1 : public X86Test {
X86GpVar v1 = c.newInt32("v1");
X86GpVar v2 = c.newInt32("v2");
Label L_1(c);
Label L_2(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
c.setArg(0, v1);
c.setArg(1, v2);
@@ -1085,10 +1085,10 @@ struct X86Test_AllocIfElse2 : public X86Test {
X86GpVar v1 = c.newInt32("v1");
X86GpVar v2 = c.newInt32("v2");
Label L_1(c);
Label L_2(c);
Label L_3(c);
Label L_4(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
Label L_3 = c.newLabel();
Label L_4 = c.newLabel();
c.setArg(0, v1);
c.setArg(1, v2);
@@ -1146,9 +1146,9 @@ struct X86Test_AllocIfElse3 : public X86Test {
X86GpVar v2 = c.newInt32("v2");
X86GpVar counter = c.newInt32("counter");
Label L_1(c);
Label L_Loop(c);
Label L_Exit(c);
Label L_1 = c.newLabel();
Label L_Loop = c.newLabel();
Label L_Exit = c.newLabel();
c.setArg(0, v1);
c.setArg(1, v2);
@@ -1206,10 +1206,10 @@ struct X86Test_AllocIfElse4 : public X86Test {
X86GpVar v2 = c.newInt32("v2");
X86GpVar counter = c.newInt32("counter");
Label L_1(c);
Label L_Loop1(c);
Label L_Loop2(c);
Label L_Exit(c);
Label L_1 = c.newLabel();
Label L_Loop1 = c.newLabel();
Label L_Loop2 = c.newLabel();
Label L_Exit = c.newLabel();
c.mov(counter, 0);
@@ -1556,8 +1556,8 @@ struct X86Test_AllocStack : public X86Test {
X86GpVar a = c.newInt32("a");
X86GpVar b = c.newInt32("b");
Label L_1(c);
Label L_2(c);
Label L_1 = c.newLabel();
Label L_2 = c.newLabel();
// Fill stack by sequence [0, 1, 2, 3 ... 255].
c.xor_(i, i);
@@ -1615,8 +1615,8 @@ struct X86Test_AllocMemcpy : public X86Test {
X86GpVar src = c.newIntPtr("src");
X86GpVar cnt = c.newUIntPtr("cnt");
Label L_Loop(c); // Create base labels we use
Label L_Exit(c); // in our function.
Label L_Loop = c.newLabel(); // Create base labels we use
Label L_Exit = c.newLabel(); // in our function.
c.addFunc(FuncBuilder3<Void, uint32_t*, const uint32_t*, size_t>(kCallConvHost));
c.setArg(0, dst);
@@ -2247,8 +2247,8 @@ struct X86Test_CallConditional : public X86Test {
c.setArg(1, y);
c.setArg(2, op);
Label opAdd(c);
Label opMul(c);
Label opAdd = c.newLabel();
Label opMul = c.newLabel();
c.cmp(op, 0);
c.jz(opAdd);
@@ -2392,7 +2392,7 @@ struct X86Test_CallRecursive : public X86Test {
virtual void compile(X86Compiler& c) {
X86GpVar val = c.newInt32("val");
Label skip(c);
Label skip = c.newLabel();
X86FuncNode* func = c.addFunc(FuncBuilder1<int, int>(kCallConvHost));
c.setArg(0, val);
@@ -2443,7 +2443,7 @@ struct X86Test_CallMisc1 : public X86Test {
virtual void compile(X86Compiler& c) {
X86GpVar val = c.newInt32("val");
Label skip(c);
Label skip = c.newLabel();
X86FuncNode* func = c.addFunc(FuncBuilder2<int, int, int>(kCallConvHost));
@@ -2690,11 +2690,11 @@ struct X86Test_MiscMultiRet : public X86Test {
X86GpVar a = c.newInt32("a");
X86GpVar b = c.newInt32("b");
Label L_Zero(c);
Label L_Add(c);
Label L_Sub(c);
Label L_Mul(c);
Label L_Div(c);
Label L_Zero = c.newLabel();
Label L_Add = c.newLabel();
Label L_Sub = c.newLabel();
Label L_Mul = c.newLabel();
Label L_Div = c.newLabel();
c.setArg(0, op);
c.setArg(1, a);
@@ -2788,7 +2788,7 @@ struct X86Test_MiscUnfollow : public X86Test {
X86GpVar a = c.newInt32("a");
X86GpVar b = c.newIntPtr("b");
Label tramp(c);
Label tramp = c.newLabel();
c.setArg(0, a);
c.setArg(1, b);
@@ -2939,10 +2939,10 @@ int X86TestSuite::run() {
FILE* file = stdout;
FileLogger fileLogger(file);
fileLogger.setOption(kLoggerOptionBinaryForm, true);
fileLogger.addOptions(Logger::kOptionBinaryForm);
StringLogger stringLogger;
stringLogger.setOption(kLoggerOptionBinaryForm, true);
stringLogger.addOptions(Logger::kOptionBinaryForm);
for (i = 0; i < count; i++) {
JitRuntime runtime;

View File

@@ -13,8 +13,6 @@
namespace asmgen {
enum { kGenBlendInstCount = 65 };
// Generate a typical alpha blend function using SSE2 instruction set. Used
// for benchmarking and also in test86. The generated code should be stable
// and fully functional.
@@ -29,25 +27,25 @@ static void blend(asmjit::X86Compiler& c) {
X86GpVar j = c.newIntPtr("j");
X86GpVar t = c.newIntPtr("t");
X86XmmVar cZero = c.newXmm("cZero");
X86XmmVar cMul255A = c.newXmm("cMul255A");
X86XmmVar cMul255M = c.newXmm("cMul255M");
X86XmmVar x0 = c.newXmm("x0");
X86XmmVar x1 = c.newXmm("x1");
X86XmmVar y0 = c.newXmm("y0");
X86XmmVar a0 = c.newXmm("a0");
X86XmmVar a1 = c.newXmm("a1");
Label L_SmallLoop(c);
Label L_SmallEnd(c);
X86XmmVar cZero = c.newXmm("cZero");
X86XmmVar cMul255A = c.newXmm("cMul255A");
X86XmmVar cMul255M = c.newXmm("cMul255M");
Label L_LargeLoop(c);
Label L_LargeEnd(c);
Label L_SmallLoop = c.newLabel();
Label L_SmallEnd = c.newLabel();
Label L_Data(c);
Label L_LargeLoop = c.newLabel();
Label L_LargeEnd = c.newLabel();
c.addFunc(FuncBuilder3<Void, void*, const void*, size_t>(kCallConvHost));
Label L_Data = c.newLabel();
c.addFunc(FuncBuilder3<Void, void*, const void*, size_t>(c.getRuntime()->getCdeclConv()));
c.setArg(0, dst);
c.setArg(1, src);
@@ -170,8 +168,8 @@ static void blend(asmjit::X86Compiler& c) {
// Data.
c.align(kAlignData, 16);
c.bind(L_Data);
c.dxmm(Vec128::fromSw(0x0080));
c.dxmm(Vec128::fromSw(0x0101));
c.dxmm(Vec128::fromSW(0x0080));
c.dxmm(Vec128::fromSW(0x0101));
}
} // asmgen namespace

View File

@@ -5,5 +5,5 @@ ASMJIT_BUILD_DIR="build_makefiles_dbg"
mkdir ../${ASMJIT_BUILD_DIR}
cd ../${ASMJIT_BUILD_DIR}
cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1
cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1
cd ${ASMJIT_CURRENT_DIR}