From 185a96a46a923c937f5d8ac3a6adaa1b183f06f2 Mon Sep 17 00:00:00 2001 From: kobalicek Date: Mon, 21 Mar 2016 20:04:13 +0100 Subject: [PATCH] Reworked CpuInfo (removed X86CpuInfo, added ARM CpuInfo support). Renamed CodeGen to ExternalTool. Moved logger constants from asmjit namespace to asmjit::Logger. Moved AssemblerFeature constants from asmjit namespace to asmjit::Assembler. Added noexcept to most APIs that are not intended to throw. Added memory utilities that can read/write to unaligned memory location (ongoing ARM support). Removed unimplemented instruction scheduler, will be added back when it's working. --- BREAKING.md | 20 + CMakeLists.txt | 57 +- README.md | 22 +- src/asmjit/arm.h | 20 + src/asmjit/asmjit.h | 52 +- src/asmjit/base.h | 1 + src/asmjit/base/assembler.cpp | 95 +- src/asmjit/base/assembler.h | 586 +++++++------ src/asmjit/base/compiler.cpp | 96 +-- src/asmjit/base/compiler.h | 228 +++-- src/asmjit/base/compilercontext.cpp | 18 +- src/asmjit/base/compilercontext_p.h | 23 +- src/asmjit/base/compilerfunc.h | 68 +- src/asmjit/base/constpool.cpp | 28 +- src/asmjit/base/constpool.h | 36 +- src/asmjit/base/containers.cpp | 124 +-- src/asmjit/base/containers.h | 343 ++------ src/asmjit/base/cpuinfo.cpp | 615 ++++++++++++- src/asmjit/base/cpuinfo.h | 350 +++++++- src/asmjit/base/globals.cpp | 14 +- src/asmjit/base/globals.h | 30 +- src/asmjit/base/hlstream.h | 540 ++++++------ src/asmjit/base/logger.cpp | 43 +- src/asmjit/base/logger.h | 129 +-- src/asmjit/base/operand.h | 470 +++++----- src/asmjit/base/podvector.cpp | 132 +++ src/asmjit/base/podvector.h | 278 ++++++ src/asmjit/base/runtime.cpp | 129 ++- src/asmjit/base/runtime.h | 121 +-- src/asmjit/base/utils.cpp | 10 +- src/asmjit/base/utils.h | 762 +++++++++++++---- src/asmjit/base/vectypes.h | 1094 ++++++++++-------------- src/asmjit/base/vmem.cpp | 85 +- src/asmjit/base/vmem.h | 36 +- src/asmjit/base/zone.cpp | 16 +- src/asmjit/base/zone.h | 28 +- src/asmjit/build.h | 38 +- src/asmjit/host.h | 3 - src/asmjit/x86.h | 1 - src/asmjit/x86/x86assembler.cpp | 216 +++-- src/asmjit/x86/x86assembler.h | 244 +++--- src/asmjit/x86/x86compiler.cpp | 178 ++-- src/asmjit/x86/x86compiler.h | 502 +++++------ src/asmjit/x86/x86compilercontext.cpp | 298 ++----- src/asmjit/x86/x86compilercontext_p.h | 6 - src/asmjit/x86/x86cpuinfo.cpp | 401 --------- src/asmjit/x86/x86cpuinfo.h | 273 ------ src/asmjit/x86/x86inst.cpp | 1141 ++++++++++++------------- src/asmjit/x86/x86inst.h | 42 +- src/asmjit/x86/x86operand.cpp | 18 +- src/asmjit/x86/x86operand.h | 1085 ++++++++++++----------- src/asmjit/x86/x86operand_regs.cpp | 264 +++--- src/asmjit/x86/x86scheduler.cpp | 94 -- src/asmjit/x86/x86scheduler_p.h | 63 -- src/test/asmjit_bench_x86.cpp | 110 ++- src/test/asmjit_test_opcode.cpp | 11 +- src/test/asmjit_test_opcode.h | 2 - src/test/asmjit_test_unit.cpp | 206 +++-- src/test/asmjit_test_x86.cpp | 88 +- src/test/genblend.h | 26 +- tools/configure-unix-makefiles-dbg.sh | 2 +- 61 files changed, 6281 insertions(+), 5730 deletions(-) create mode 100644 src/asmjit/arm.h create mode 100644 src/asmjit/base/podvector.cpp create mode 100644 src/asmjit/base/podvector.h delete mode 100644 src/asmjit/x86/x86cpuinfo.cpp delete mode 100644 src/asmjit/x86/x86cpuinfo.h delete mode 100644 src/asmjit/x86/x86scheduler.cpp delete mode 100644 src/asmjit/x86/x86scheduler_p.h diff --git a/BREAKING.md b/BREAKING.md index fa0b953..03e1ec5 100644 --- a/BREAKING.md +++ b/BREAKING.md @@ -1,3 +1,23 @@ +2016-03-21 + +CpuInfo has been completely redesigned. It now supports multiple CPUs without having to inherit it to support a specific architecture. Also all CpuInfo-related constants have been moved to CpuInfo. + +Change: + +``` +const X86CpuInfo* cpu = X86CpuInfo::getHost(); +cpu->hasFeature(kX86CpuFeatureSSE4_1); +``` + +to + +``` +const CpuInfo& cpu = CpuInfo::getHost(); +cpu.hasFeature(CpuInfo::kX86FeatureSSE4_1); +``` + +The whole code-base now uses `noexcept` keyword to inform API users that these functions won't throw an exception. Moreover, the possibility to throw exception through `ErrorHandler` has been removed as it seems that nobody has ever used it. `Assembler::emit()` and friends are still not marked as `noexcept` in case this decision is taken back. If there is no complaint even `emit()` functions will be marked `noexcept` in the near future. + 2015-12-07 ---------- diff --git a/CMakeLists.txt b/CMakeLists.txt index 467bb24..ec42b9c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -10,6 +10,18 @@ cmake_minimum_required(VERSION 3.1) # Whether to build a static library (default FALSE). # set(ASMJIT_STATIC FALSE) +# Whether to build ARM32 backend (TRUE if building for ARM32). +# set(ASMJIT_BUILD_ARM32 FALSE) + +# Whether to build ARM64 backend (TRUE if building for ARM64). +# set(ASMJIT_BUILD_ARM64 FALSE) + +# Whether to build X86 backend (TRUE if building for X86). +# set(ASMJIT_BUILD_X86 FALSE) + +# Whether to build X64 backend (TRUE if building for X64). +# set(ASMJIT_BUILD_X64 FALSE) + # Whether to build tests and samples (default FALSE). # set(ASMJIT_BUILD_TEST FALSE) @@ -55,6 +67,7 @@ message("-- [asmjit] ASMJIT_DIR=${ASMJIT_DIR}") set(ASMJIT_SOURCE_DIR "${ASMJIT_DIR}/src") # Asmjit source directory. set(ASMJIT_INCLUDE_DIR "${ASMJIT_SOURCE_DIR}") # Asmjit include directory. +set(ASMJIT_CFLAGS) # Asmjit CFLAGS / CXXFLAGS. set(ASMJIT_DEPS) # Asmjit dependencies (list of libraries) for the linker. set(ASMJIT_LIBS) # Asmjit dependencies with asmjit included, for consumers. @@ -104,11 +117,28 @@ if(NOT ASMJIT_EMBED) list(INSERT ASMJIT_LIBS 0 asmjit) endif() -set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG}) -set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL}) +if(ASMJIT_BUILD_ARM32) + List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM32") +endif() + +if(ASMJIT_BUILD_ARM64) + List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_ARM64") +endif() + +if(ASMJIT_BUILD_X86) + List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X86") +endif() + +if(ASMJIT_BUILD_X64) + List(APPEND ASMJIT_CFLAGS "${ASMJIT_D}ASMJIT_BUILD_X64") +endif() + +set(ASMJIT_PRIVATE_CFLAGS_DBG ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_DBG}) +set(ASMJIT_PRIVATE_CFLAGS_REL ${ASMJIT_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_PRIVATE_CFLAGS_REL}) message("-- [asmjit] ASMJIT_DEPS=${ASMJIT_DEPS}") message("-- [asmjit] ASMJIT_LIBS=${ASMJIT_LIBS}") +message("-- [asmjit] ASMJIT_CFLAGS=${ASMJIT_CFLAGS}") # ============================================================================= # [AsmJit - Macros] @@ -168,6 +198,8 @@ asmjit_add_source(ASMJIT_SRC asmjit base.h build.h host.h + + arm.h x86.h ) @@ -193,6 +225,8 @@ asmjit_add_source(ASMJIT_SRC asmjit/base logger.h operand.cpp operand.h + podvector.cpp + podvector.h runtime.cpp runtime.h utils.cpp @@ -204,6 +238,18 @@ asmjit_add_source(ASMJIT_SRC asmjit/base zone.h ) +if(0) +asmjit_add_source(ASMJIT_SRC asmjit/arm + armassembler.cpp + armassembler.h + arminst.cpp + arminst.h + armoperand.cpp + armoperand_regs.cpp + armoperand.h +) +endif() + asmjit_add_source(ASMJIT_SRC asmjit/x86 x86assembler.cpp x86assembler.h @@ -213,15 +259,11 @@ asmjit_add_source(ASMJIT_SRC asmjit/x86 x86compilercontext_p.h x86compilerfunc.cpp x86compilerfunc.h - x86cpuinfo.cpp - x86cpuinfo.h x86inst.cpp x86inst.h x86operand.cpp x86operand_regs.cpp x86operand.h - x86scheduler.cpp - x86scheduler_p.h ) # ============================================================================= @@ -252,7 +294,7 @@ if(NOT ASMJIT_EMBED) # Add `asmjit` tests and samples. if(ASMJIT_BUILD_TEST) set(ASMJIT_TEST_SRC "") - set(ASMJIT_TEST_CFLAGS ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED) + set(ASMJIT_TEST_CFLAGS ${ASMJIT_CFLAGS} ${ASMJIT_D}ASMJIT_TEST ${ASMJIT_D}ASMJIT_EMBED) asmjit_add_source(ASMJIT_TEST_SRC test asmjit_test_unit.cpp broken.cpp broken.h) add_executable(asmjit_test_unit ${ASMJIT_SRC} ${ASMJIT_TEST_SRC}) @@ -273,6 +315,7 @@ if(NOT ASMJIT_EMBED) foreach(_target asmjit_bench_x86 asmjit_test_opcode asmjit_test_x86) add_executable(${_target} "src/test/${_target}.cpp") + target_compile_options(${_target} PRIVATE ${ASMJIT_CFLAGS}) target_link_libraries(${_target} ${ASMJIT_LIBS}) endforeach() endif() diff --git a/README.md b/README.md index a0c6673..3f7b72b 100644 --- a/README.md +++ b/README.md @@ -37,15 +37,16 @@ Supported Environments ### C++ Compilers - * BorlandC++ (not tested regularly) - * CLang (tested by Travis-CI) + * Clang (tested by Travis-CI) + * CodeGear (including BorlandC++, not tested regularly) * GCC (tested by Travis-CI) * MinGW (tested manually) - * MSVC (tested manually, at least Visual Studio 2003 required) + * MSVC (tested manually, at least VS2003 is required) * Other compilers require some testing and support in `asmjit/build.h` ### Backends + * ARM (work-in-progress) * X86 (tested by Travis-CI) * X64 (tested by Travis-CI) @@ -54,9 +55,10 @@ Project Organization * `/` - Project root * `src` - Source code - * `asmjit` - Public header files (always include from here) - * `base` - Base files, used by AsmJit and all backends - * `x86` - X86/X64 specific files, used only by X86/X64 backend + * `asmjit` - Source code and headers (always point include path in here) + * `base` - Generic API and interfaces, used by all backends + * `arm` - ARM/ARM64 specific API, used only by ARM and ARM64 backends + * `x86` - X86/X64 specific API, used only by X86 and X64 backends * `test` - Unit and integration tests (don't embed in your project) * `tools` - Tools used for configuring, documenting and generating files @@ -93,9 +95,11 @@ AsmJit is designed to be easy embeddable in any kind project. However, it has so ### Architectures - * `ASMJIT_BUILD_X86` - Always build x86 backend regardless of host architecture. - * `ASMJIT_BUILD_X64` - Always build x64 backend regardless of host architecture. - * `ASMJIT_BUILD_HOST` - Always build host backend, if only `ASMJIT_BUILD_HOST` is used only the host architecture detected at compile-time will be included. + * `ASMJIT_BUILD_ARM` - Build ARM backend. + * `ASMJIT_BUILD_ARM64` - Build ARM64 backend. + * `ASMJIT_BUILD_X86` - Build x86 backend. + * `ASMJIT_BUILD_X64` - Build x64 backend. + * `ASMJIT_BUILD_HOST` - Build host backend, if only `ASMJIT_BUILD_HOST` is used only the host architecture detected at compile-time will be included. * By default only `ASMJIT_BUILD_HOST` is defined. diff --git a/src/asmjit/arm.h b/src/asmjit/arm.h new file mode 100644 index 0000000..afa2a0d --- /dev/null +++ b/src/asmjit/arm.h @@ -0,0 +1,20 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_ARM_H +#define _ASMJIT_ARM_H + +// [Dependencies - AsmJit] +#include "./base.h" + +#include "./arm/armassembler.h" +#include "./arm/armcompiler.h" +#include "./arm/arminst.h" +#include "./arm/armoperand.h" + +// [Guard] +#endif // _ASMJIT_ARM_H diff --git a/src/asmjit/asmjit.h b/src/asmjit/asmjit.h index 7cdc7bd..600dfa2 100644 --- a/src/asmjit/asmjit.h +++ b/src/asmjit/asmjit.h @@ -81,7 +81,7 @@ //! //! List of the most useful code-generation and operand classes: //! - \ref asmjit::Assembler - Low-level code-generation. -//! - \ref asmjit::CodeGen - Astract code-generation that serializes to `Assembler`: +//! - \ref asmjit::ExternalTool - An external tool that can serialize to `Assembler`: //! - \ref asmjit::Compiler - High-level code-generation. //! - \ref asmjit::Runtime - Describes where the code is stored and how it's executed: //! - \ref asmjit::HostRuntime - Runtime that runs on the host machine: @@ -307,69 +307,47 @@ //! the host X86/X64 processor. AsmJit contains utilities that can get the most //! important information related to the features supported by the CPU and the //! host operating system, in addition to host processor name and number of -//! cores. Class `X86CpuInfo` extends `CpuInfo` and provides functionality -//! specific to X86 and X64. +//! cores. Class `CpuInfo` provides generic information about a host or target +//! processor and contains also a specific X86/X64 information. //! //! By default AsmJit queries the CPU information after the library is loaded //! and the queried information is reused by all instances of `JitRuntime`. -//! The global instance of `X86CpuInfo` can't be changed, because it will affect +//! The global instance of `CpuInfo` can't be changed, because it will affect //! the code generation of all `Runtime`s. If there is a need to have a //! specific CPU information which contains modified features or processor -//! vendor it's possible by creating a new instance of `X86CpuInfo` and setting -//! up its members. `X86CpuUtil::detect` can be used to detect CPU features into -//! an existing `X86CpuInfo` instance - it may become handly if only one property -//! has to be turned on/off. -//! -//! If the high-level interface `X86CpuInfo` offers is not enough there is also -//! `X86CpuUtil::callCpuId` helper that can be used to call CPUID instruction -//! with a given parameters and to consume the output. +//! vendor it's possible by creating a new instance of the `CpuInfo` and setting +//! up its members. //! //! Cpu detection is important when generating a JIT code that may or may not //! use certain CPU features. For example there used to be a SSE/SSE2 detection //! in the past and today there is often AVX/AVX2 detection. //! -//! The example below shows how to detect SSE4.1: +//! The example below shows how to detect a SSE4.1 instruction set: //! //! ~~~ //! using namespace asmjit; //! -//! // Get `X86CpuInfo` global instance. -//! const X86CpuInfo* cpuInfo = X86CpuInfo::getHost(); +//! const CpuInfo& cpuInfo = CpuInfo::getHost(); //! -//! if (cpuInfo->hasFeature(kX86CpuFeatureSSE4_1)) { +//! if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE4_1)) { //! // Processor has SSE4.1. //! } -//! else if (cpuInfo->hasFeature(kX86CpuFeatureSSE2)) { +//! else if (cpuInfo.hasFeature(CpuInfo::kX86FeatureSSE2)) { //! // Processor doesn't have SSE4.1, but has SSE2. //! } //! else { //! // Processor is archaic; it's a wonder AsmJit works here! //! } //! ~~~ -//! -//! The next example shows how to call `CPUID` directly: -//! -//! ~~~ -//! using namespace asmjit; -//! -//! // Call CPUID, first two arguments are passed in EAX/ECX. -//! X86CpuId out; -//! X86CpuUtil::callCpuId(0, 0, &out); -//! -//! // If EAX argument is 0, EBX, ECX and EDX registers are filled with a CPU vendor. -//! char cpuVendor[13]; -//! ::memcpy(cpuVendor, &out.ebx, 4); -//! ::memcpy(cpuVendor + 4, &out.edx, 4); -//! ::memcpy(cpuVendor + 8, &out.ecx, 4); -//! vendor[12] = '\0'; -//! -//! // Print the CPU vendor retrieved from CPUID. -//! ::printf("CPU Vendor: %s\n", cpuVendor); -//! ~~~ // [Dependencies - Base] #include "./base.h" +// [Dependencies - ARM/ARM64] +#if defined(ASMJIT_BUILD_ARM32) || defined(ASMJIT_BUILD_ARM64) +#include "./arm.h" +#endif // ASMJIT_BUILD_ARM32 || ASMJIT_BUILD_ARM64 + // [Dependencies - X86/X64] #if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64) #include "./x86.h" diff --git a/src/asmjit/base.h b/src/asmjit/base.h index 315899d..522b946 100644 --- a/src/asmjit/base.h +++ b/src/asmjit/base.h @@ -18,6 +18,7 @@ #include "./base/globals.h" #include "./base/logger.h" #include "./base/operand.h" +#include "./base/podvector.h" #include "./base/runtime.h" #include "./base/utils.h" #include "./base/vectypes.h" diff --git a/src/asmjit/base/assembler.cpp b/src/asmjit/base/assembler.cpp index c6d41da..fd0a600 100644 --- a/src/asmjit/base/assembler.cpp +++ b/src/asmjit/base/assembler.cpp @@ -24,29 +24,29 @@ namespace asmjit { // [asmjit::ErrorHandler] // ============================================================================ -ErrorHandler::ErrorHandler() {} -ErrorHandler::~ErrorHandler() {} +ErrorHandler::ErrorHandler() noexcept {} +ErrorHandler::~ErrorHandler() noexcept {} -ErrorHandler* ErrorHandler::addRef() const { +ErrorHandler* ErrorHandler::addRef() const noexcept { return const_cast(this); } -void ErrorHandler::release() {} +void ErrorHandler::release() noexcept {} // ============================================================================ -// [asmjit::CodeGen] +// [asmjit::ExternalTool] // ============================================================================ -CodeGen::CodeGen() +ExternalTool::ExternalTool() noexcept : _assembler(nullptr), - _hlId(0), + _exId(0), _arch(kArchNone), _regSize(0), _finalized(false), _reserved(0), _lastError(kErrorNotInitialized) {} -CodeGen::~CodeGen() {} +ExternalTool::~ExternalTool() noexcept {} -Error CodeGen::setLastError(Error error, const char* message) { +Error ExternalTool::setLastError(Error error, const char* message) noexcept { // Special case, reset the last error the error is `kErrorOk`. if (error == kErrorOk) { _lastError = kErrorOk; @@ -63,7 +63,7 @@ Error CodeGen::setLastError(Error error, const char* message) { // Logging is skipped if the error is handled by `ErrorHandler. ErrorHandler* eh = assembler->getErrorHandler(); - ASMJIT_TLOG("[ERROR (CodeGen)] %s (0x%0.8u) %s\n", message, + ASMJIT_TLOG("[ERROR (ExternalTool)] %s (0x%0.8u) %s\n", message, static_cast(error), !eh ? "(Possibly unhandled?)" : ""); @@ -73,8 +73,8 @@ Error CodeGen::setLastError(Error error, const char* message) { #if !defined(ASMJIT_DISABLE_LOGGER) Logger* logger = assembler->getLogger(); if (logger != nullptr) - logger->logFormat(kLoggerStyleComment, - "*** ERROR (CodeGen): %s (0x%0.8u).\n", message, + logger->logFormat(Logger::kStyleComment, + "*** ERROR (ExternalTool): %s (0x%0.8u).\n", message, static_cast(error)); #endif // !ASMJIT_DISABLE_LOGGER @@ -89,18 +89,18 @@ Error CodeGen::setLastError(Error error, const char* message) { // [asmjit::Assembler - Construction / Destruction] // ============================================================================ -Assembler::Assembler(Runtime* runtime) +Assembler::Assembler(Runtime* runtime) noexcept : _runtime(runtime), _logger(nullptr), _errorHandler(nullptr), _arch(kArchNone), _regSize(0), _reserved(0), - _features(Utils::mask(kAssemblerFeatureOptimizedAlign)), + _asmOptions(0), _instOptions(0), _lastError(runtime ? kErrorOk : kErrorNotInitialized), - _hlIdGenerator(0), - _hlAttachedCount(0), + _exIdGenerator(0), + _exCountAttached(0), _zoneAllocator(8192 - Zone::kZoneOverhead), _buffer(nullptr), _end(nullptr), @@ -108,10 +108,10 @@ Assembler::Assembler(Runtime* runtime) _trampolinesSize(0), _comment(nullptr), _unusedLinks(nullptr), - _labelList(), - _relocList() {} + _labels(), + _relocations() {} -Assembler::~Assembler() { +Assembler::~Assembler() noexcept { reset(true); if (_errorHandler != nullptr) @@ -122,12 +122,12 @@ Assembler::~Assembler() { // [asmjit::Assembler - Reset] // ============================================================================ -void Assembler::reset(bool releaseMemory) { - _features = Utils::mask(kAssemblerFeatureOptimizedAlign); +void Assembler::reset(bool releaseMemory) noexcept { + _asmOptions = 0; _instOptions = 0; _lastError = kErrorOk; - _hlIdGenerator = 0; - _hlAttachedCount = 0; + _exIdGenerator = 0; + _exCountAttached = 0; _zoneAllocator.reset(releaseMemory); @@ -143,15 +143,16 @@ void Assembler::reset(bool releaseMemory) { _comment = nullptr; _unusedLinks = nullptr; - _labelList.reset(releaseMemory); - _relocList.reset(releaseMemory); + _sections.reset(releaseMemory); + _labels.reset(releaseMemory); + _relocations.reset(releaseMemory); } // ============================================================================ // [asmjit::Assembler - Logging & Error Handling] // ============================================================================ -Error Assembler::setLastError(Error error, const char* message) { +Error Assembler::setLastError(Error error, const char* message) noexcept { // Special case, reset the last error the error is `kErrorOk`. if (error == kErrorOk) { _lastError = kErrorOk; @@ -173,7 +174,7 @@ Error Assembler::setLastError(Error error, const char* message) { #if !defined(ASMJIT_DISABLE_LOGGER) Logger* logger = _logger; if (logger != nullptr) - logger->logFormat(kLoggerStyleComment, + logger->logFormat(Logger::kStyleComment, "*** ERROR (Assembler): %s (0x%0.8u).\n", message, static_cast(error)); #endif // !ASMJIT_DISABLE_LOGGER @@ -185,7 +186,7 @@ Error Assembler::setLastError(Error error, const char* message) { return error; } -Error Assembler::setErrorHandler(ErrorHandler* handler) { +Error Assembler::setErrorHandler(ErrorHandler* handler) noexcept { ErrorHandler* oldHandler = _errorHandler; if (oldHandler != nullptr) @@ -202,7 +203,7 @@ Error Assembler::setErrorHandler(ErrorHandler* handler) { // [asmjit::Assembler - Buffer] // ============================================================================ -Error Assembler::_grow(size_t n) { +Error Assembler::_grow(size_t n) noexcept { size_t capacity = getCapacity(); size_t after = getOffset() + n; @@ -237,7 +238,7 @@ Error Assembler::_grow(size_t n) { return _reserve(capacity); } -Error Assembler::_reserve(size_t n) { +Error Assembler::_reserve(size_t n) noexcept { size_t capacity = getCapacity(); if (n <= capacity) return kErrorOk; @@ -264,16 +265,16 @@ Error Assembler::_reserve(size_t n) { // [asmjit::Assembler - Label] // ============================================================================ -Error Assembler::_newLabelId() { +Error Assembler::_newLabelId() noexcept { LabelData* data = _zoneAllocator.allocT(); data->offset = -1; data->links = nullptr; - data->hlId = 0; - data->hlData = nullptr; + data->exId = 0; + data->exData = nullptr; - uint32_t id = OperandUtil::makeLabelId(static_cast(_labelList.getLength())); - Error error = _labelList.append(data); + uint32_t id = OperandUtil::makeLabelId(static_cast(_labels.getLength())); + Error error = _labels.append(data); if (error != kErrorOk) { setLastError(kErrorNoHeapMemory); @@ -283,7 +284,7 @@ Error Assembler::_newLabelId() { return id; } -LabelLink* Assembler::_newLabelLink() { +LabelLink* Assembler::_newLabelLink() noexcept { LabelLink* link = _unusedLinks; if (link) { @@ -303,7 +304,7 @@ LabelLink* Assembler::_newLabelLink() { return link; } -Error Assembler::bind(const Label& label) { +Error Assembler::bind(const Label& label) noexcept { // Get label data based on label id. uint32_t index = label.getId(); LabelData* data = getLabelData(index); @@ -318,11 +319,11 @@ Error Assembler::bind(const Label& label) { sb.setFormat("L%u:", index); size_t binSize = 0; - if ((_logger->getOptions() & (1 << kLoggerOptionBinaryForm)) == 0) + if (!_logger->hasOption(Logger::kOptionBinaryForm)) binSize = kInvalidIndex; LogUtil::formatLine(sb, nullptr, binSize, 0, 0, _comment); - _logger->logString(kLoggerStyleLabel, sb.getData(), sb.getLength()); + _logger->logString(Logger::kStyleLabel, sb.getData(), sb.getLength()); } #endif // !ASMJIT_DISABLE_LOGGER @@ -338,7 +339,7 @@ Error Assembler::bind(const Label& label) { if (link->relocId != -1) { // Handle RelocData - We have to update RelocData information instead of // patching the displacement in LabelData. - _relocList[link->relocId].data += static_cast(pos); + _relocations[link->relocId].data += static_cast(pos); } else { // Not using relocId, this means that we are overwriting a real @@ -347,16 +348,16 @@ Error Assembler::bind(const Label& label) { static_cast(pos) - offset + link->displacement); // Size of the value we are going to patch. Only BYTE/DWORD is allowed. - uint32_t size = getByteAt(offset); + uint32_t size = readU8At(offset); ASMJIT_ASSERT(size == 1 || size == 4); if (size == 4) { - setInt32At(offset, patchedValue); + writeI32At(offset, patchedValue); } else { ASMJIT_ASSERT(size == 1); if (Utils::isInt8(patchedValue)) - setByteAt(offset, static_cast(patchedValue & 0xFF)); + writeU8At(offset, static_cast(patchedValue) & 0xFF); else error = kErrorIllegalDisplacement; } @@ -391,7 +392,7 @@ Error Assembler::bind(const Label& label) { // [asmjit::Assembler - Embed] // ============================================================================ -Error Assembler::embed(const void* data, uint32_t size) { +Error Assembler::embed(const void* data, uint32_t size) noexcept { if (getRemainingSpace() < size) { Error error = _grow(size); if (error != kErrorOk) @@ -404,7 +405,7 @@ Error Assembler::embed(const void* data, uint32_t size) { #if !defined(ASMJIT_DISABLE_LOGGER) if (_logger) - _logger->logBinary(kLoggerStyleData, data, size); + _logger->logBinary(Logger::kStyleData, data, size); #endif // !ASMJIT_DISABLE_LOGGER return kErrorOk; @@ -414,7 +415,7 @@ Error Assembler::embed(const void* data, uint32_t size) { // [asmjit::Assembler - Reloc] // ============================================================================ -size_t Assembler::relocCode(void* dst, Ptr baseAddress) const { +size_t Assembler::relocCode(void* dst, Ptr baseAddress) const noexcept { if (baseAddress == kNoBaseAddress) baseAddress = static_cast((uintptr_t)dst); return _relocCode(dst, baseAddress); @@ -424,7 +425,7 @@ size_t Assembler::relocCode(void* dst, Ptr baseAddress) const { // [asmjit::Assembler - Make] // ============================================================================ -void* Assembler::make() { +void* Assembler::make() noexcept { // Do nothing on error condition or if no instruction has been emitted. if (_lastError != kErrorOk || getCodeSize() == 0) return nullptr; diff --git a/src/asmjit/base/assembler.h b/src/asmjit/base/assembler.h index 480c9f5..15c472e 100644 --- a/src/asmjit/base/assembler.h +++ b/src/asmjit/base/assembler.h @@ -12,6 +12,7 @@ #include "../base/containers.h" #include "../base/logger.h" #include "../base/operand.h" +#include "../base/podvector.h" #include "../base/runtime.h" #include "../base/zone.h" @@ -23,48 +24,6 @@ namespace asmjit { //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::AssemblerFeatures] -// ============================================================================ - -//! Features of \ref Assembler. -ASMJIT_ENUM(AssemblerFeatures) { - //! Emit optimized code-alignment sequences (`Assembler` and `Compiler`). - //! - //! Default `true`. - //! - //! X86/X64 - //! ------- - //! - //! Default align sequence used by X86/X64 architecture is one-byte 0x90 - //! opcode that is mostly shown by disassemblers as nop. However there are - //! more optimized align sequences for 2-11 bytes that may execute faster. - //! If this feature is enabled asmjit will generate specialized sequences - //! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used, - //! it can add REX prefixes into the code to make some instructions greater - //! so no alignment sequence is needed. - kAssemblerFeatureOptimizedAlign = 0, - - //! Emit jump-prediction hints (`Assembler` and `Compiler`). - //! - //! Default `false`. - //! - //! X86/X64 - //! ------- - //! - //! Jump prediction is usually based on the direction of the jump. If the - //! jump is backward it is usually predicted as taken; and if the jump is - //! forward it is usually predicted as not-taken. The reason is that loops - //! generally use backward jumps and conditions usually use forward jumps. - //! However this behavior can be overridden by using instruction prefixes. - //! If this option is enabled these hints will be emitted. - //! - //! This feature is disabled by default, because the only processor that - //! used to take into consideration prediction hints was P4. Newer processors - //! implement heuristics for branch prediction that ignores any static hints. - kAssemblerFeaturePredictedJumps = 1 -}; - // ============================================================================ // [asmjit::InstId] // ============================================================================ @@ -230,13 +189,12 @@ struct LabelData { //! Label links chain. LabelLink* links; - //! An ID of a code-generator that created this label. - uint64_t hlId; - //! Pointer to the data the code-generator associated with the label. - void* hlData; + //! External tool ID, if linked to any. + uint64_t exId; + //! Pointer to a data that `ExternalTool` associated with the label. + void* exData; }; - // ============================================================================ // [asmjit::RelocData] // ============================================================================ @@ -271,7 +229,7 @@ struct RelocData { //! Error handler. //! -//! Error handler can be used to override the default behavior of `CodeGen` +//! Error handler can be used to override the default behavior of `Assembler` //! error handling and propagation. See `handleError()` on how to override it. //! //! Please note that `addRef` and `release` functions are used, but there is @@ -283,9 +241,9 @@ struct ASMJIT_VIRTAPI ErrorHandler { // -------------------------------------------------------------------------- //! Create a new `ErrorHandler` instance. - ASMJIT_API ErrorHandler(); + ASMJIT_API ErrorHandler() noexcept; //! Destroy the `ErrorHandler` instance. - ASMJIT_API virtual ~ErrorHandler(); + ASMJIT_API virtual ~ErrorHandler() noexcept; // -------------------------------------------------------------------------- // [AddRef / Release] @@ -298,13 +256,13 @@ struct ASMJIT_VIRTAPI ErrorHandler { //! multiple `ErrorHandler` instances are used by a different code generators //! you may provide your own functionality for reference counting. In that //! case `addRef()` and `release()` functions should be overridden. - ASMJIT_API virtual ErrorHandler* addRef() const; + ASMJIT_API virtual ErrorHandler* addRef() const noexcept; //! Release this error handler. //! //! \note This member function is provided for convenience. See `addRef()` //! for more detailed information related to reference counting. - ASMJIT_API virtual void release(); + ASMJIT_API virtual void release() noexcept; // -------------------------------------------------------------------------- // [Handle Error] @@ -312,50 +270,40 @@ struct ASMJIT_VIRTAPI ErrorHandler { //! Error handler (pure). //! - //! Error handler is called when an error happened. An error can happen in - //! many places, but error handler is mostly used by `Assembler` and - //! `Compiler` classes to report anything that may cause incorrect code - //! generation. There are multiple ways how the error handler can be used - //! and each has it's pros/cons. + //! Error handler is called after an error happened. An error can happen in + //! many places, but error handler is mostly used by `Assembler` to report + //! anything a fatal problem. There are multiple ways how the error handler + //! can be used: //! - //! AsmJit library doesn't use exceptions and can be compiled with or without - //! exception handling support. Even if the AsmJit library is compiled without - //! exceptions it is exception-safe and handleError() can report an incoming - //! error by throwing an exception of any type. It's guaranteed that the - //! exception won't be catched by AsmJit and will be propagated to the code - //! calling AsmJit `Assembler` or `Compiler` methods. Alternative to - //! throwing an exception is using `setjmp()` and `longjmp()` pair available - //! in the standard C library. + //! 1. Returning `true` or `false` from `handleError()`. If `true` is + //! returned it means that error was reported and AsmJit can continue + //! with code-generation. However, `false` reports to AsmJit that the + //! error cannot be handled, in such case it stores the error in + //! `Assembler` and puts it into an error state. The error is accessible + //! through `Assembler::getLastError(). Returning `false` is default when + //! no error handler is used. //! - //! If the exception or setjmp() / longjmp() mechanism is used, the state of - //! the `BaseAssember` or `Compiler` is unchanged and if it's possible the - //! execution (instruction serialization) can continue. However if the error - //! happened during any phase that translates or modifies the stored code - //! (for example relocation done by `Assembler` or analysis/translation - //! done by `Compiler`) the execution can't continue and the error will - //! be also stored in `Assembler` or `Compiler`. - //! - //! Finally, if no exceptions nor setjmp() / longjmp() mechanisms were used, - //! you can still implement a compatible handling by returning from your - //! error handler. Returning `true` means that error was reported and AsmJit - //! should continue execution, but `false` sets the error immediately to the - //! `Assembler` or `Compiler` and execution shouldn't continue (this is the - //! default behavior in case no error handler is used). - virtual bool handleError(Error code, const char* message, void* origin) = 0; + //! 2. AsmJit doesn't use exception handling so your error should also not + //! throw an exception, however, it's possible to use plain old C's + //! `setjmp()` and `longjmp()`. Asmjit always puts `Assembler` and + //! `Compiler` to a consistent state before calling the `handleError()`, + //! so you can use `longjmp()` to leave the code-generation if an error + //! happened. + virtual bool handleError(Error code, const char* message, void* origin) noexcept = 0; }; // ============================================================================ -// [asmjit::CodeGen] +// [asmjit::ExternalTool] // ============================================================================ -//! Interface to implement an external code generator (i.e. `Compiler`). -struct ASMJIT_VIRTAPI CodeGen { +//! An external tool (i.e. `Stream` or `Compiler`) that can serialize to `Assembler` +struct ASMJIT_VIRTAPI ExternalTool { // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_API CodeGen(); - ASMJIT_API virtual ~CodeGen(); + ASMJIT_API ExternalTool() noexcept; + ASMJIT_API virtual ~ExternalTool() noexcept; // -------------------------------------------------------------------------- // [Attach / Reset] @@ -364,10 +312,10 @@ struct ASMJIT_VIRTAPI CodeGen { //! \internal //! //! Called to attach this code generator to the `assembler`. - virtual Error attach(Assembler* assembler) = 0; + virtual Error attach(Assembler* assembler) noexcept = 0; //! Reset the code-generator (also detaches if attached). - virtual void reset(bool releaseMemory) = 0; + virtual void reset(bool releaseMemory) noexcept = 0; // -------------------------------------------------------------------------- // [Finalize] @@ -377,45 +325,45 @@ struct ASMJIT_VIRTAPI CodeGen { //! //! The finalization has two passes: //! - serializes code to the attached assembler. - //! - resets the `CodeGen` (detaching from the `Assembler as well) so it can - //! be reused or destroyed. - virtual Error finalize() = 0; + //! - resets the `ExternalTool` (detaching from the `Assembler as well) so + //! it can be reused or destroyed). + virtual Error finalize() noexcept = 0; // -------------------------------------------------------------------------- // [Runtime / Assembler] // -------------------------------------------------------------------------- //! Get the `Runtime` instance that is associated with the code-generator. - ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; } + ASMJIT_INLINE Runtime* getRuntime() const noexcept { return _runtime; } //! Get the `Assembler` instance that is associated with the code-generator. - ASMJIT_INLINE Assembler* getAssembler() const { return _assembler; } + ASMJIT_INLINE Assembler* getAssembler() const noexcept { return _assembler; } // -------------------------------------------------------------------------- // [Architecture] // -------------------------------------------------------------------------- //! Get the target architecture. - ASMJIT_INLINE uint32_t getArch() const { return _arch; } + ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; } //! Get the default register size - 4 or 8 bytes, depends on the target. - ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; } + ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regSize; } // -------------------------------------------------------------------------- // [Error Handling] // -------------------------------------------------------------------------- //! Get the last error code. - ASMJIT_INLINE Error getLastError() const { return _lastError; } + ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; } //! Set the last error code and propagate it through the error handler. - ASMJIT_API Error setLastError(Error error, const char* message = nullptr); + ASMJIT_API Error setLastError(Error error, const char* message = nullptr) noexcept; //! Clear the last error code. - ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; } + ASMJIT_INLINE void resetLastError() noexcept { _lastError = kErrorOk; } // -------------------------------------------------------------------------- - // [CodeGen] + // [ID] // -------------------------------------------------------------------------- - //! Get the code-generator ID, provided by `Assembler` when attached to it. - ASMJIT_INLINE uint64_t getHLId() const { return _hlId; } + //! Get the tool ID, provided by `Assembler` when attached to it. + ASMJIT_INLINE uint64_t getExId() const noexcept { return _exId; } // -------------------------------------------------------------------------- // [Members] @@ -426,17 +374,17 @@ struct ASMJIT_VIRTAPI CodeGen { //! Associated assembler. Assembler* _assembler; - //! High-level ID, provided by `Assembler`. + //! `ExternalTool` ID, provided by `Assembler`. //! //! If multiple high-evel code generators are associated with a single - //! assembler the `_hlId` member can be used to distinguish between them and + //! assembler the `_exId` member can be used to distinguish between them and //! to provide a mechanism to check whether the high-level code generator is //! accessing the resource it really owns. - uint64_t _hlId; + uint64_t _exId; - //! Target architecture ID. + //! Target's architecture ID. uint8_t _arch; - //! Target architecture GP register size in bytes (4 or 8). + //! Target's architecture GP register size in bytes (4 or 8). uint8_t _regSize; //! The code generator has been finalized. uint8_t _finalized; @@ -452,21 +400,97 @@ struct ASMJIT_VIRTAPI CodeGen { //! Base assembler. //! -//! This class implements the base interface that is used by architecture +//! This class implements a base interface that is used by architecture //! specific assemblers. //! //! \sa Compiler. struct ASMJIT_VIRTAPI Assembler { ASMJIT_NO_COPY(Assembler) + // -------------------------------------------------------------------------- + // [Options] + // -------------------------------------------------------------------------- + + //! Assembler options. + ASMJIT_ENUM(Options) { + //! Emit optimized code-alignment sequences (`Assembler` and `Compiler`). + //! + //! Default `true`. + //! + //! X86/X64 Specific + //! ---------------- + //! + //! Default align sequence used by X86/X64 architecture is one-byte 0x90 + //! opcode that is mostly shown by disassemblers as nop. However there are + //! more optimized align sequences for 2-11 bytes that may execute faster. + //! If this feature is enabled asmjit will generate specialized sequences + //! for alignment between 1 to 11 bytes. Also when `X86Compiler` is used, + //! it can add REX prefixes into the code to make some instructions greater + //! so no alignment sequence is needed. + kOptionOptimizedAlign = 0, + + //! Emit jump-prediction hints (`Assembler` and `Compiler`). + //! + //! Default `false`. + //! + //! X86/X64 Specific + //! ---------------- + //! + //! Jump prediction is usually based on the direction of the jump. If the + //! jump is backward it is usually predicted as taken; and if the jump is + //! forward it is usually predicted as not-taken. The reason is that loops + //! generally use backward jumps and conditions usually use forward jumps. + //! However this behavior can be overridden by using instruction prefixes. + //! If this option is enabled these hints will be emitted. + //! + //! This feature is disabled by default, because the only processor that + //! used to take into consideration prediction hints was P4. Newer processors + //! implement heuristics for branch prediction that ignores any static hints. + kOptionPredictedJumps = 1 + }; + + // -------------------------------------------------------------------------- + // [Buffer] + // -------------------------------------------------------------------------- + + //! Code or data buffer. + struct Buffer { + //! Code data. + uint8_t* data; + //! Total length of `data` in bytes. + size_t capacity; + //! Number of bytes of `data` used. + size_t length; + //! Current offset (assembler's cursor) in bytes. + size_t offset; + }; + + // -------------------------------------------------------------------------- + // [Section] + // -------------------------------------------------------------------------- + + //! Code or data section. + struct Section { + //! Section id. + uint32_t id; + //! Section flags. + uint32_t flags; + //! Section name (limited to 35 characters, PE allows max 8 chars). + char name[36]; + //! Section alignment requirements (0 if no requirements). + uint32_t alignment; + //! Section content. + Buffer content; + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- //! Create a new `Assembler` instance. - ASMJIT_API Assembler(Runtime* runtime); + ASMJIT_API Assembler(Runtime* runtime) noexcept; //! Destroy the `Assembler` instance. - ASMJIT_API virtual ~Assembler(); + ASMJIT_API virtual ~Assembler() noexcept; // -------------------------------------------------------------------------- // [Reset] @@ -475,7 +499,7 @@ struct ASMJIT_VIRTAPI Assembler { //! Reset the assembler. //! //! If `releaseMemory` is true all buffers will be released to the system. - ASMJIT_API void reset(bool releaseMemory = false); + ASMJIT_API void reset(bool releaseMemory = false) noexcept; // -------------------------------------------------------------------------- // [Runtime] @@ -484,16 +508,16 @@ struct ASMJIT_VIRTAPI Assembler { //! Get the runtime associated with the assembler. //! //! NOTE: Runtime is persistent across `reset()` calls. - ASMJIT_INLINE Runtime* getRuntime() const { return _runtime; } + ASMJIT_INLINE Runtime* getRuntime() const noexcept { return _runtime; } // -------------------------------------------------------------------------- // [Architecture] // -------------------------------------------------------------------------- //! Get the target architecture. - ASMJIT_INLINE uint32_t getArch() const { return _arch; } + ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; } //! Get the default register size - 4 or 8 bytes, depends on the target. - ASMJIT_INLINE uint32_t getRegSize() const { return _regSize; } + ASMJIT_INLINE uint32_t getRegSize() const noexcept { return _regSize; } // -------------------------------------------------------------------------- // [Logging] @@ -501,11 +525,11 @@ struct ASMJIT_VIRTAPI Assembler { #if !defined(ASMJIT_DISABLE_LOGGER) //! Get whether the assembler has a logger. - ASMJIT_INLINE bool hasLogger() const { return _logger != nullptr; } + ASMJIT_INLINE bool hasLogger() const noexcept { return _logger != nullptr; } //! Get the logger. - ASMJIT_INLINE Logger* getLogger() const { return _logger; } + ASMJIT_INLINE Logger* getLogger() const noexcept { return _logger; } //! Set the logger to `logger`. - ASMJIT_INLINE void setLogger(Logger* logger) { _logger = logger; } + ASMJIT_INLINE void setLogger(Logger* logger) noexcept { _logger = logger; } #endif // !ASMJIT_DISABLE_LOGGER // -------------------------------------------------------------------------- @@ -513,71 +537,70 @@ struct ASMJIT_VIRTAPI Assembler { // -------------------------------------------------------------------------- //! Get the error handler. - ASMJIT_INLINE ErrorHandler* getErrorHandler() const { return _errorHandler; } + ASMJIT_INLINE ErrorHandler* getErrorHandler() const noexcept { return _errorHandler; } //! Set the error handler. - ASMJIT_API Error setErrorHandler(ErrorHandler* handler); + ASMJIT_API Error setErrorHandler(ErrorHandler* handler) noexcept; //! Clear the error handler. - ASMJIT_INLINE Error resetErrorHandler() { return setErrorHandler(nullptr); } + ASMJIT_INLINE Error resetErrorHandler() noexcept { return setErrorHandler(nullptr); } //! Get the last error code. - ASMJIT_INLINE Error getLastError() const { return _lastError; } + ASMJIT_INLINE Error getLastError() const noexcept { return _lastError; } //! Set the last error code and propagate it through the error handler. - ASMJIT_API Error setLastError(Error error, const char* message = nullptr); + ASMJIT_API Error setLastError(Error error, const char* message = nullptr) noexcept; //! Clear the last error code. - ASMJIT_INLINE void resetLastError() { _lastError = kErrorOk; } + ASMJIT_INLINE void resetLastError() noexcept { _lastError = kErrorOk; } // -------------------------------------------------------------------------- - // [External CodeGen] + // [Serializers] // -------------------------------------------------------------------------- //! \internal //! //! Called after the code generator `cg` has been attached to the assembler. - ASMJIT_INLINE void _attached(CodeGen* cg) { - cg->_runtime = getRuntime(); - cg->_assembler = this; - cg->_hlId = _nextExternalId(); - _hlAttachedCount++; + ASMJIT_INLINE void _attached(ExternalTool* exTool) noexcept { + exTool->_runtime = getRuntime(); + exTool->_assembler = this; + exTool->_exId = _nextExId(); + _exCountAttached++; } //! \internal //! //! Called after the code generator `cg` has been detached from the assembler. - ASMJIT_INLINE void _detached(CodeGen* cg) { - cg->_runtime = nullptr; - cg->_assembler = nullptr; - cg->_hlId = 0; - _hlAttachedCount--; + ASMJIT_INLINE void _detached(ExternalTool* exTool) noexcept { + exTool->_runtime = nullptr; + exTool->_assembler = nullptr; + exTool->_exId = 0; + _exCountAttached--; } //! \internal //! //! Return a new code-gen ID (always greater than zero). - ASMJIT_INLINE uint64_t _nextExternalId() { - ASMJIT_ASSERT(_hlIdGenerator != ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF)); - return ++_hlIdGenerator; + ASMJIT_INLINE uint64_t _nextExId() noexcept { + ASMJIT_ASSERT(_exIdGenerator != ASMJIT_UINT64_C(0xFFFFFFFFFFFFFFFF)); + return ++_exIdGenerator; } // -------------------------------------------------------------------------- - // [Assembler Features] + // [Assembler Options] // -------------------------------------------------------------------------- - //! Get code-generator features. - ASMJIT_INLINE uint32_t getFeatures() const { return _features; } - //! Set code-generator features. - ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; } - - //! Get code-generator `feature`. - ASMJIT_INLINE bool hasFeature(uint32_t feature) const { - ASMJIT_ASSERT(feature < 32); - return (_features & (1 << feature)) != 0; + //! Get global assembler options. + ASMJIT_INLINE uint32_t getAsmOptions() const noexcept { + return _asmOptions; } - - //! Set code-generator `feature` to `value`. - ASMJIT_INLINE void setFeature(uint32_t feature, bool value) { - ASMJIT_ASSERT(feature < 32); - feature = static_cast(value) << feature; - _features = (_features & ~feature) | feature; + //! Get whether the global assembler `option` is turned on. + ASMJIT_INLINE bool hasAsmOption(uint32_t option) const noexcept { + return (_asmOptions & option) != 0; + } + //! Turn on global assembler `options`. + ASMJIT_INLINE void addAsmOptions(uint32_t options) noexcept { + _asmOptions |= options; + } + //! Turn off global assembler `options`. + ASMJIT_INLINE void clearAsmOptions(uint32_t options) noexcept { + _asmOptions &= ~options; } // -------------------------------------------------------------------------- @@ -585,12 +608,15 @@ struct ASMJIT_VIRTAPI Assembler { // -------------------------------------------------------------------------- //! Get options of the next instruction. - ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; } + ASMJIT_INLINE uint32_t getInstOptions() const noexcept { + return _instOptions; + } //! Set options of the next instruction. - ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; } - + ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept { + _instOptions = instOptions; + } //! Get options of the next instruction and reset them. - ASMJIT_INLINE uint32_t getInstOptionsAndReset() { + ASMJIT_INLINE uint32_t getInstOptionsAndReset() noexcept { uint32_t instOptions = _instOptions; _instOptions = 0; return instOptions; @@ -605,17 +631,23 @@ struct ASMJIT_VIRTAPI Assembler { //! The internal code-buffer will grow at least by `n` bytes so `n` bytes can //! be added to it. If `n` is zero or `getOffset() + n` is not greater than //! the current capacity of the code-buffer this function does nothing. - ASMJIT_API Error _grow(size_t n); + ASMJIT_API Error _grow(size_t n) noexcept; //! Reserve the code-buffer to at least `n` bytes. - ASMJIT_API Error _reserve(size_t n); + ASMJIT_API Error _reserve(size_t n) noexcept; //! Get capacity of the code-buffer. - ASMJIT_INLINE size_t getCapacity() const { return (size_t)(_end - _buffer); } + ASMJIT_INLINE size_t getCapacity() const noexcept { + return (size_t)(_end - _buffer); + } //! Get the number of remaining bytes in code-buffer. - ASMJIT_INLINE size_t getRemainingSpace() const { return (size_t)(_end - _cursor); } + ASMJIT_INLINE size_t getRemainingSpace() const noexcept { + return (size_t)(_end - _cursor); + } //! Get current offset in buffer, same as `getOffset() + getTramplineSize()`. - ASMJIT_INLINE size_t getCodeSize() const { return getOffset() + getTrampolinesSize(); } + ASMJIT_INLINE size_t getCodeSize() const noexcept { + return getOffset() + getTrampolinesSize(); + } //! Get size of all possible trampolines. //! @@ -623,25 +655,25 @@ struct ASMJIT_VIRTAPI Assembler { //! addresses. This value is only non-zero if jmp of call instructions were //! used with immediate operand (this means jumping or calling an absolute //! address directly). - ASMJIT_INLINE size_t getTrampolinesSize() const { return _trampolinesSize; } + ASMJIT_INLINE size_t getTrampolinesSize() const noexcept { return _trampolinesSize; } //! Get code-buffer. - ASMJIT_INLINE uint8_t* getBuffer() const { return _buffer; } + ASMJIT_INLINE uint8_t* getBuffer() const noexcept { return _buffer; } //! Get the end of the code-buffer (points to the first byte that is invalid). - ASMJIT_INLINE uint8_t* getEnd() const { return _end; } + ASMJIT_INLINE uint8_t* getEnd() const noexcept { return _end; } //! Get the current position in the code-buffer. - ASMJIT_INLINE uint8_t* getCursor() const { return _cursor; } + ASMJIT_INLINE uint8_t* getCursor() const noexcept { return _cursor; } //! Set the current position in the buffer. - ASMJIT_INLINE void setCursor(uint8_t* cursor) { + ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { ASMJIT_ASSERT(cursor >= _buffer && cursor <= _end); _cursor = cursor; } //! Get the current offset in the buffer. - ASMJIT_INLINE size_t getOffset() const { return (size_t)(_cursor - _buffer); } + ASMJIT_INLINE size_t getOffset() const noexcept { return (size_t)(_cursor - _buffer); } //! Set the current offset in the buffer to `offset` and return the previous value. - ASMJIT_INLINE size_t setOffset(size_t offset) { + ASMJIT_INLINE size_t setOffset(size_t offset) noexcept { ASMJIT_ASSERT(offset < getCapacity()); size_t oldOffset = (size_t)(_cursor - _buffer); @@ -649,76 +681,100 @@ struct ASMJIT_VIRTAPI Assembler { return oldOffset; } - //! Get BYTE at position `pos`. - ASMJIT_INLINE uint8_t getByteAt(size_t pos) const { + //! Read `int8_t` at index `pos`. + ASMJIT_INLINE int32_t readI8At(size_t pos) const noexcept { ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); + return Utils::readI8(_buffer + pos); } - //! Get WORD at position `pos`. - ASMJIT_INLINE uint16_t getWordAt(size_t pos) const { - ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); - } - - //! Get DWORD at position `pos`. - ASMJIT_INLINE uint32_t getDWordAt(size_t pos) const { - ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); - } - - //! Get QWORD at position `pos`. - ASMJIT_INLINE uint64_t getQWordAt(size_t pos) const { - ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); - } - - //! Get int32_t at position `pos`. - ASMJIT_INLINE int32_t getInt32At(size_t pos) const { - ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); - } - - //! Get uint32_t at position `pos`. - ASMJIT_INLINE uint32_t getUInt32At(size_t pos) const { - ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - return *reinterpret_cast(_buffer + pos); - } - - //! Set BYTE at position `pos`. - ASMJIT_INLINE void setByteAt(size_t pos, uint8_t x) { + //! Read `uint8_t` at index `pos`. + ASMJIT_INLINE uint32_t readU8At(size_t pos) const noexcept { ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + return Utils::readU8(_buffer + pos); } - //! Set WORD at position `pos`. - ASMJIT_INLINE void setWordAt(size_t pos, uint16_t x) { + //! Read `int16_t` at index `pos`. + ASMJIT_INLINE int32_t readI16At(size_t pos) const noexcept { ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + return Utils::readI16u(_buffer + pos); } - //! Set DWORD at position `pos`. - ASMJIT_INLINE void setDWordAt(size_t pos, uint32_t x) { + //! Read `uint16_t` at index `pos`. + ASMJIT_INLINE uint32_t readU16At(size_t pos) const noexcept { + ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer)); + return Utils::readU16u(_buffer + pos); + } + + //! Read `int32_t` at index `pos`. + ASMJIT_INLINE int32_t readI32At(size_t pos) const noexcept { ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + return Utils::readI32u(_buffer + pos); } - //! Set QWORD at position `pos`. - ASMJIT_INLINE void setQWordAt(size_t pos, uint64_t x) { + //! Read `uint32_t` at index `pos`. + ASMJIT_INLINE uint32_t readU32At(size_t pos) const noexcept { + ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); + return Utils::readU32u(_buffer + pos); + } + + //! Read `uint64_t` at index `pos`. + ASMJIT_INLINE int64_t readI64At(size_t pos) const noexcept { ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + return Utils::readI64u(_buffer + pos); } - //! Set int32_t at position `pos`. - ASMJIT_INLINE void setInt32At(size_t pos, int32_t x) { - ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + //! Read `uint64_t` at index `pos`. + ASMJIT_INLINE uint64_t readU64At(size_t pos) const noexcept { + ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer)); + return Utils::readU64u(_buffer + pos); } - //! Set uint32_t at position `pos`. - ASMJIT_INLINE void setUInt32At(size_t pos, uint32_t x) { + //! Write `int8_t` at index `pos`. + ASMJIT_INLINE void writeI8At(size_t pos, int32_t x) noexcept { + ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer)); + Utils::writeI8(_buffer + pos, x); + } + + //! Write `uint8_t` at index `pos`. + ASMJIT_INLINE void writeU8At(size_t pos, uint32_t x) noexcept { + ASMJIT_ASSERT(pos + 1 <= (size_t)(_end - _buffer)); + Utils::writeU8(_buffer + pos, x); + } + + //! Write `int8_t` at index `pos`. + ASMJIT_INLINE void writeI16At(size_t pos, int32_t x) noexcept { + ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer)); + Utils::writeI16u(_buffer + pos, x); + } + + //! Write `uint8_t` at index `pos`. + ASMJIT_INLINE void writeU16At(size_t pos, uint32_t x) noexcept { + ASMJIT_ASSERT(pos + 2 <= (size_t)(_end - _buffer)); + Utils::writeU16u(_buffer + pos, x); + } + + //! Write `int32_t` at index `pos`. + ASMJIT_INLINE void writeI32At(size_t pos, int32_t x) noexcept { ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); - *reinterpret_cast(_buffer + pos) = x; + Utils::writeI32u(_buffer + pos, x); + } + + //! Write `uint32_t` at index `pos`. + ASMJIT_INLINE void writeU32At(size_t pos, uint32_t x) noexcept { + ASMJIT_ASSERT(pos + 4 <= (size_t)(_end - _buffer)); + Utils::writeU32u(_buffer + pos, x); + } + + //! Write `int64_t` at index `pos`. + ASMJIT_INLINE void writeI64At(size_t pos, int64_t x) noexcept { + ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer)); + Utils::writeI64u(_buffer + pos, x); + } + + //! Write `uint64_t` at index `pos`. + ASMJIT_INLINE void writeU64At(size_t pos, uint64_t x) noexcept { + ASMJIT_ASSERT(pos + 8 <= (size_t)(_end - _buffer)); + Utils::writeU64u(_buffer + pos, x); } // -------------------------------------------------------------------------- @@ -726,7 +782,7 @@ struct ASMJIT_VIRTAPI Assembler { // -------------------------------------------------------------------------- //! Embed raw data into the code-buffer. - ASMJIT_API virtual Error embed(const void* data, uint32_t size); + ASMJIT_API virtual Error embed(const void* data, uint32_t size) noexcept; // -------------------------------------------------------------------------- // [Align] @@ -736,24 +792,24 @@ struct ASMJIT_VIRTAPI Assembler { //! //! The sequence that is used to fill the gap between the aligned location //! and the current depends on `alignMode`, see \ref AlignMode. - virtual Error align(uint32_t alignMode, uint32_t offset) = 0; + virtual Error align(uint32_t alignMode, uint32_t offset) noexcept = 0; // -------------------------------------------------------------------------- // [Label] // -------------------------------------------------------------------------- //! Get number of labels created. - ASMJIT_INLINE size_t getLabelsCount() const { - return _labelList.getLength(); + ASMJIT_INLINE size_t getLabelsCount() const noexcept { + return _labels.getLength(); } //! Get whether the `label` is valid (i.e. registered). - ASMJIT_INLINE bool isLabelValid(const Label& label) const { + ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.getId()); } //! Get whether the label `id` is valid (i.e. registered). - ASMJIT_INLINE bool isLabelValid(uint32_t id) const { - return static_cast(id) < _labelList.getLength(); + ASMJIT_INLINE bool isLabelValid(uint32_t id) const noexcept { + return static_cast(id) < _labels.getLength(); } //! Get whether the `label` is bound. @@ -762,53 +818,52 @@ struct ASMJIT_VIRTAPI Assembler { //! of the label by using `isLabelValid()` method before the bound check if //! you are not sure about its validity, otherwise you may hit an assertion //! failure in debug mode, and undefined behavior in release mode. - ASMJIT_INLINE bool isLabelBound(const Label& label) const { + ASMJIT_INLINE bool isLabelBound(const Label& label) const noexcept { return isLabelBound(label.getId()); } //! \overload - ASMJIT_INLINE bool isLabelBound(uint32_t id) const { + ASMJIT_INLINE bool isLabelBound(uint32_t id) const noexcept { ASMJIT_ASSERT(isLabelValid(id)); - - return _labelList[id]->offset != -1; + return _labels[id]->offset != -1; } //! Get a `label` offset or -1 if the label is not yet bound. - ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const { + ASMJIT_INLINE intptr_t getLabelOffset(const Label& label) const noexcept { return getLabelOffset(label.getId()); } //! \overload - ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const { + ASMJIT_INLINE intptr_t getLabelOffset(uint32_t id) const noexcept { ASMJIT_ASSERT(isLabelValid(id)); - return _labelList[id]->offset; + return _labels[id]->offset; } //! Get `LabelData` by `label`. - ASMJIT_INLINE LabelData* getLabelData(const Label& label) const { + ASMJIT_INLINE LabelData* getLabelData(const Label& label) const noexcept { return getLabelData(label.getId()); } //! \overload - ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const { + ASMJIT_INLINE LabelData* getLabelData(uint32_t id) const noexcept { ASMJIT_ASSERT(isLabelValid(id)); - return const_cast(_labelList[id]); + return const_cast(_labels[id]); } //! \internal //! //! Create a new label and return its ID. - ASMJIT_API uint32_t _newLabelId(); + ASMJIT_API uint32_t _newLabelId() noexcept; //! \internal //! //! New LabelLink instance. - ASMJIT_API LabelLink* _newLabelLink(); + ASMJIT_API LabelLink* _newLabelLink() noexcept; //! Create and return a new `Label`. - ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); } + ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); } //! Bind the `label` to the current offset. //! //! \note Label can be bound only once! - ASMJIT_API virtual Error bind(const Label& label); + ASMJIT_API virtual Error bind(const Label& label) noexcept; // -------------------------------------------------------------------------- // [Reloc] @@ -831,18 +886,18 @@ struct ASMJIT_VIRTAPI Assembler { //! //! A given buffer will be overwritten, to get the number of bytes required, //! use `getCodeSize()`. - ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const; + ASMJIT_API size_t relocCode(void* dst, Ptr baseAddress = kNoBaseAddress) const noexcept; //! \internal //! //! Reloc code. - virtual size_t _relocCode(void* dst, Ptr baseAddress) const = 0; + virtual size_t _relocCode(void* dst, Ptr baseAddress) const noexcept = 0; // -------------------------------------------------------------------------- // [Make] // -------------------------------------------------------------------------- - ASMJIT_API virtual void* make(); + ASMJIT_API virtual void* make() noexcept; // -------------------------------------------------------------------------- // [Emit] @@ -886,14 +941,8 @@ struct ASMJIT_VIRTAPI Assembler { //! Associated runtime. Runtime* _runtime; - -#if !defined(ASMJIT_DISABLE_LOGGER) //! Associated logger. Logger* _logger; -#else - //! Makes libraries built with or without logging support binary compatible. - void* _logger; -#endif // ASMJIT_DISABLE_LOGGER //! Associated error handler, triggered by \ref setLastError(). ErrorHandler* _errorHandler; @@ -904,26 +953,26 @@ struct ASMJIT_VIRTAPI Assembler { //! \internal uint16_t _reserved; - //! Assembler features, used by \ref hasFeature() and \ref setFeature(). - uint32_t _features; - //! Options affecting the next instruction. + //! Assembler options, used by \ref getAsmOptions() and \ref hasAsmOption(). + uint32_t _asmOptions; + //! Instruction options, affect the next instruction that will be emitted. uint32_t _instOptions; //! Last error code. uint32_t _lastError; - //! CodeGen ID generator. - uint64_t _hlIdGenerator; - //! Count of high-level code generators attached. - size_t _hlAttachedCount; + //! External tool ID generator. + uint64_t _exIdGenerator; + //! Count of external tools currently attached. + size_t _exCountAttached; //! General purpose zone allocator. Zone _zoneAllocator; - //! Start of the code-buffer. + //! Start of the code-buffer of the current section. uint8_t* _buffer; - //! End of the code-buffer (points to the first invalid byte). + //! End of the code-buffer of the current section (points to the first invalid byte). uint8_t* _end; - //! The current position in code `_buffer`. + //! The current position in `_buffer` of the current section. uint8_t* _cursor; //! Size of all possible trampolines. @@ -934,23 +983,16 @@ struct ASMJIT_VIRTAPI Assembler { //! Unused `LabelLink` structures pool. LabelLink* _unusedLinks; - //! LabelData list. - PodVector _labelList; - //! RelocData list. - PodVector _relocList; + //! Assembler sections. + PodVectorTmp _sections; + //! Assembler labels. + PodVectorTmp _labels; + //! Table of relocations. + PodVector _relocations; }; //! \} -// ============================================================================ -// [Defined-Later] -// ============================================================================ - -ASMJIT_INLINE Label::Label(Assembler& a) : Operand(NoInit) { - reset(); - _label.id = a._newLabelId(); -} - } // asmjit namespace // [Api-End] diff --git a/src/asmjit/base/compiler.cpp b/src/asmjit/base/compiler.cpp index 7101d98..0d6cde6 100644 --- a/src/asmjit/base/compiler.cpp +++ b/src/asmjit/base/compiler.cpp @@ -38,7 +38,7 @@ enum { kCompilerDefaultLookAhead = 64 }; // [asmjit::Compiler - Construction / Destruction] // ============================================================================ -Compiler::Compiler() : +Compiler::Compiler() noexcept : _features(0), _maxLookAhead(kCompilerDefaultLookAhead), _instOptions(0), @@ -56,13 +56,13 @@ Compiler::Compiler() : _constAllocator(4096 - Zone::kZoneOverhead), _localConstPool(&_constAllocator), _globalConstPool(&_zoneAllocator) {} -Compiler::~Compiler() {} +Compiler::~Compiler() noexcept {} // ============================================================================ // [asmjit::Compiler - Attach / Reset] // ============================================================================ -void Compiler::reset(bool releaseMemory) { +void Compiler::reset(bool releaseMemory) noexcept { Assembler* assembler = getAssembler(); if (assembler != nullptr) assembler->_detached(this); @@ -105,7 +105,7 @@ void Compiler::reset(bool releaseMemory) { // [asmjit::Compiler - Node-Factory] // ============================================================================ -HLData* Compiler::newDataNode(const void* data, uint32_t size) { +HLData* Compiler::newDataNode(const void* data, uint32_t size) noexcept { if (size > HLData::kInlineBufferSize) { void* clonedData = _stringAllocator.alloc(size); if (clonedData == nullptr) @@ -119,11 +119,11 @@ HLData* Compiler::newDataNode(const void* data, uint32_t size) { return newNode(const_cast(data), size); } -HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) { +HLAlign* Compiler::newAlignNode(uint32_t alignMode, uint32_t offset) noexcept { return newNode(alignMode, offset); } -HLLabel* Compiler::newLabelNode() { +HLLabel* Compiler::newLabelNode() noexcept { Assembler* assembler = getAssembler(); if (assembler == nullptr) return nullptr; @@ -134,16 +134,16 @@ HLLabel* Compiler::newLabelNode() { if (node == nullptr) return nullptr; // These have to be zero now. - ASMJIT_ASSERT(ld->hlId == 0); - ASMJIT_ASSERT(ld->hlData == nullptr); + ASMJIT_ASSERT(ld->exId == 0); + ASMJIT_ASSERT(ld->exData == nullptr); - ld->hlId = _hlId; - ld->hlData = node; + ld->exId = _exId; + ld->exData = node; return node; } -HLComment* Compiler::newCommentNode(const char* str) { +HLComment* Compiler::newCommentNode(const char* str) noexcept { if (str != nullptr && str[0]) { str = _stringAllocator.sdup(str); if (str == nullptr) @@ -153,7 +153,7 @@ HLComment* Compiler::newCommentNode(const char* str) { return newNode(str); } -HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) { +HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept { if (var.getId() == kInvalidValue) return nullptr; @@ -165,7 +165,7 @@ HLHint* Compiler::newHintNode(Var& var, uint32_t hint, uint32_t value) { // [asmjit::Compiler - Code-Stream] // ============================================================================ -HLNode* Compiler::addNode(HLNode* node) { +HLNode* Compiler::addNode(HLNode* node) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); @@ -199,7 +199,7 @@ HLNode* Compiler::addNode(HLNode* node) { return node; } -HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) { +HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); @@ -220,7 +220,7 @@ HLNode* Compiler::addNodeBefore(HLNode* node, HLNode* ref) { return node; } -HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) { +HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) noexcept { ASMJIT_ASSERT(node != nullptr); ASMJIT_ASSERT(node->_prev == nullptr); ASMJIT_ASSERT(node->_next == nullptr); @@ -241,7 +241,7 @@ HLNode* Compiler::addNodeAfter(HLNode* node, HLNode* ref) { return node; } -static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) { +static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) noexcept { if (node_->isJmpOrJcc()) { HLJump* node = static_cast(node_); HLLabel* label = node->getTarget(); @@ -269,7 +269,7 @@ static ASMJIT_INLINE void Compiler_nodeRemoved(Compiler* self, HLNode* node_) { } } -HLNode* Compiler::removeNode(HLNode* node) { +HLNode* Compiler::removeNode(HLNode* node) noexcept { HLNode* prev = node->_prev; HLNode* next = node->_next; @@ -293,7 +293,7 @@ HLNode* Compiler::removeNode(HLNode* node) { return node; } -void Compiler::removeNodes(HLNode* first, HLNode* last) { +void Compiler::removeNodes(HLNode* first, HLNode* last) noexcept { if (first == last) { removeNode(first); return; @@ -330,7 +330,7 @@ void Compiler::removeNodes(HLNode* first, HLNode* last) { } } -HLNode* Compiler::setCursor(HLNode* node) { +HLNode* Compiler::setCursor(HLNode* node) noexcept { HLNode* old = _cursor; _cursor = node; return old; @@ -340,7 +340,7 @@ HLNode* Compiler::setCursor(HLNode* node) { // [asmjit::Compiler - Align] // ============================================================================ -Error Compiler::align(uint32_t alignMode, uint32_t offset) { +Error Compiler::align(uint32_t alignMode, uint32_t offset) noexcept { HLAlign* node = newAlignNode(alignMode, offset); if (node == nullptr) return setLastError(kErrorNoHeapMemory); @@ -353,25 +353,25 @@ Error Compiler::align(uint32_t alignMode, uint32_t offset) { // [asmjit::Compiler - Label] // ============================================================================ -HLLabel* Compiler::getHLLabel(uint32_t id) const { +HLLabel* Compiler::getHLLabel(uint32_t id) const noexcept { Assembler* assembler = getAssembler(); if (assembler == nullptr) return nullptr; LabelData* ld = assembler->getLabelData(id); - if (ld->hlId == _hlId) - return static_cast(ld->hlData); + if (ld->exId == _exId) + return static_cast(ld->exData); else return nullptr; } -bool Compiler::isLabelValid(uint32_t id) const { +bool Compiler::isLabelValid(uint32_t id) const noexcept { Assembler* assembler = getAssembler(); if (assembler == nullptr) return false; return static_cast(id) < assembler->getLabelsCount(); } -uint32_t Compiler::_newLabelId() { +uint32_t Compiler::_newLabelId() noexcept { HLLabel* node = newLabelNode(); if (node == nullptr) { setLastError(kErrorNoHeapMemory); @@ -381,7 +381,7 @@ uint32_t Compiler::_newLabelId() { return node->getLabelId(); } -Error Compiler::bind(const Label& label) { +Error Compiler::bind(const Label& label) noexcept { HLLabel* node = getHLLabel(label); if (node == nullptr) return setLastError(kErrorInvalidState); @@ -393,7 +393,7 @@ Error Compiler::bind(const Label& label) { // [asmjit::Compiler - Embed] // ============================================================================ -Error Compiler::embed(const void* data, uint32_t size) { +Error Compiler::embed(const void* data, uint32_t size) noexcept { HLData* node = newDataNode(data, size); if (node == nullptr) return setLastError(kErrorNoHeapMemory); @@ -402,7 +402,7 @@ Error Compiler::embed(const void* data, uint32_t size) { return kErrorOk; } -Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) { +Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) noexcept { if (label.getId() == kInvalidValue) return kErrorInvalidState; @@ -423,7 +423,7 @@ Error Compiler::embedConstPool(const Label& label, const ConstPool& pool) { // [asmjit::Compiler - Comment] // ============================================================================ -Error Compiler::comment(const char* fmt, ...) { +Error Compiler::comment(const char* fmt, ...) noexcept { char buf[256]; char* p = buf; @@ -448,7 +448,7 @@ Error Compiler::comment(const char* fmt, ...) { // [asmjit::Compiler - Hint] // ============================================================================ -Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) { +Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; @@ -464,9 +464,9 @@ Error Compiler::_hint(Var& var, uint32_t hint, uint32_t value) { // [asmjit::Compiler - Vars] // ============================================================================ -VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* name) { +VarData* Compiler::_newVd(const VarInfo& vi, const char* name) noexcept { VarData* vd = reinterpret_cast(_varAllocator.alloc(sizeof(VarData))); - if (vd == nullptr) + if (ASMJIT_UNLIKELY(vd == nullptr)) goto _NoMemory; vd->_name = noName; @@ -479,8 +479,8 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* } #endif // !ASMJIT_DISABLE_LOGGER - vd->_type = static_cast(type); - vd->_class = static_cast(c); + vd->_type = static_cast(vi.getTypeId()); + vd->_class = static_cast(vi.getRegClass()); vd->_flags = 0; vd->_priority = 10; @@ -492,9 +492,9 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* vd->_saveOnUnuse = false; vd->_modified = false; vd->_reserved0 = 0; - vd->_alignment = static_cast(Utils::iMin(size, 64)); + vd->_alignment = static_cast(Utils::iMin(vi.getSize(), 64)); - vd->_size = size; + vd->_size = vi.getSize(); vd->_homeMask = 0; vd->_memOffset = 0; @@ -507,7 +507,7 @@ VarData* Compiler::_newVd(uint32_t type, uint32_t size, uint32_t c, const char* vd->_va = nullptr; - if (_varList.append(vd) != kErrorOk) + if (ASMJIT_UNLIKELY(_varList.append(vd) != kErrorOk)) goto _NoMemory; return vd; @@ -516,43 +516,43 @@ _NoMemory: return nullptr; } -Error Compiler::alloc(Var& var) { +Error Compiler::alloc(Var& var) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintAlloc, kInvalidValue); } -Error Compiler::alloc(Var& var, uint32_t regIndex) { +Error Compiler::alloc(Var& var, uint32_t regIndex) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintAlloc, regIndex); } -Error Compiler::alloc(Var& var, const Reg& reg) { +Error Compiler::alloc(Var& var, const Reg& reg) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintAlloc, reg.getRegIndex()); } -Error Compiler::save(Var& var) { +Error Compiler::save(Var& var) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintSave, kInvalidValue); } -Error Compiler::spill(Var& var) { +Error Compiler::spill(Var& var) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintSpill, kInvalidValue); } -Error Compiler::unuse(Var& var) { +Error Compiler::unuse(Var& var) noexcept { if (var.getId() == kInvalidValue) return kErrorOk; return _hint(var, kVarHintUnuse, kInvalidValue); } -uint32_t Compiler::getPriority(Var& var) const { +uint32_t Compiler::getPriority(Var& var) const noexcept { if (var.getId() == kInvalidValue) return kInvalidValue; @@ -560,7 +560,7 @@ uint32_t Compiler::getPriority(Var& var) const { return vd->getPriority(); } -void Compiler::setPriority(Var& var, uint32_t priority) { +void Compiler::setPriority(Var& var, uint32_t priority) noexcept { if (var.getId() == kInvalidValue) return; @@ -571,7 +571,7 @@ void Compiler::setPriority(Var& var, uint32_t priority) { vd->_priority = static_cast(priority); } -bool Compiler::getSaveOnUnuse(Var& var) const { +bool Compiler::getSaveOnUnuse(Var& var) const noexcept { if (var.getId() == kInvalidValue) return false; @@ -579,7 +579,7 @@ bool Compiler::getSaveOnUnuse(Var& var) const { return static_cast(vd->_saveOnUnuse); } -void Compiler::setSaveOnUnuse(Var& var, bool value) { +void Compiler::setSaveOnUnuse(Var& var, bool value) noexcept { if (var.getId() == kInvalidValue) return; @@ -587,7 +587,7 @@ void Compiler::setSaveOnUnuse(Var& var, bool value) { vd->_saveOnUnuse = value; } -void Compiler::rename(Var& var, const char* fmt, ...) { +void Compiler::rename(Var& var, const char* fmt, ...) noexcept { if (var.getId() == kInvalidValue) return; diff --git a/src/asmjit/base/compiler.h b/src/asmjit/base/compiler.h index 096845f..cda16e8 100644 --- a/src/asmjit/base/compiler.h +++ b/src/asmjit/base/compiler.h @@ -18,6 +18,7 @@ #include "../base/containers.h" #include "../base/hlstream.h" #include "../base/operand.h" +#include "../base/podvector.h" #include "../base/utils.h" #include "../base/zone.h" @@ -48,8 +49,8 @@ ASMJIT_ENUM(CompilerFeatures) { //! Default `false` - has to be explicitly enabled as the scheduler needs //! some time to run. //! - //! X86/X64 - //! ------- + //! X86/X64 Specific + //! ---------------- //! //! If scheduling is enabled AsmJit will try to reorder instructions to //! minimize the dependency chain. Scheduler always runs after the registers @@ -71,6 +72,65 @@ ASMJIT_ENUM(ConstScope) { kConstScopeGlobal = 1 }; +// ============================================================================ +// [asmjit::VarInfo] +// ============================================================================ + +struct VarInfo { + // ============================================================================ + // [Flags] + // ============================================================================ + + //! \internal + //! + //! Variable flags. + ASMJIT_ENUM(Flags) { + //! Variable contains one or more single-precision floating point. + kFlagSP = 0x10, + //! Variable contains one or more double-precision floating point. + kFlagDP = 0x20, + //! Variable is a vector, contains packed data. + kFlagSIMD = 0x80 + }; + + // -------------------------------------------------------------------------- + // [Accessors] + // -------------------------------------------------------------------------- + + //! Get type id. + ASMJIT_INLINE uint32_t getTypeId() const noexcept { return _typeId; } + //! Get type name. + ASMJIT_INLINE const char* getTypeName() const noexcept { return _typeName; } + + //! Get register size in bytes. + ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; } + //! Get variable class, see \ref RegClass. + ASMJIT_INLINE uint32_t getRegClass() const noexcept { return _regClass; } + //! Get register type, see `X86RegType`. + ASMJIT_INLINE uint32_t getRegType() const noexcept { return _regType; } + //! Get type flags, see `VarFlag`. + ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + //! Variable type id. + uint8_t _typeId; + //! Variable and register size (in bytes). + uint8_t _size; + //! Register class, see `RegClass`. + uint8_t _regClass; + //! Register type the variable is mapped to. + uint8_t _regType; + + //! Variable info flags, see \ref Flags. + uint32_t _flags; + + //! Variable type name. + char _typeName[8]; +}; + // ============================================================================ // [asmjit::Compiler] // ============================================================================ @@ -78,7 +138,7 @@ ASMJIT_ENUM(ConstScope) { //! Compiler interface. //! //! \sa Assembler. -struct ASMJIT_VIRTAPI Compiler : public CodeGen { +struct ASMJIT_VIRTAPI Compiler : public ExternalTool { ASMJIT_NO_COPY(Compiler) // -------------------------------------------------------------------------- @@ -86,43 +146,51 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { // -------------------------------------------------------------------------- //! Create a new `Compiler` instance. - ASMJIT_API Compiler(); + ASMJIT_API Compiler() noexcept; //! Destroy the `Compiler` instance. - ASMJIT_API virtual ~Compiler(); + ASMJIT_API virtual ~Compiler() noexcept; // -------------------------------------------------------------------------- // [Reset] // -------------------------------------------------------------------------- //! \override - ASMJIT_API virtual void reset(bool releaseMemory); + ASMJIT_API virtual void reset(bool releaseMemory) noexcept; // -------------------------------------------------------------------------- // [Compiler Features] // -------------------------------------------------------------------------- //! Get code-generator features. - ASMJIT_INLINE uint32_t getFeatures() const { return _features; } + ASMJIT_INLINE uint32_t getFeatures() const noexcept { + return _features; + } //! Set code-generator features. - ASMJIT_INLINE void setFeatures(uint32_t features) { _features = features; } + ASMJIT_INLINE void setFeatures(uint32_t features) noexcept { + _features = features; + } //! Get code-generator `feature`. - ASMJIT_INLINE bool hasFeature(uint32_t feature) const { + ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept { ASMJIT_ASSERT(feature < 32); return (_features & (1 << feature)) != 0; } //! Set code-generator `feature` to `value`. - ASMJIT_INLINE void setFeature(uint32_t feature, bool value) { + ASMJIT_INLINE void setFeature(uint32_t feature, bool value) noexcept { ASMJIT_ASSERT(feature < 32); feature = static_cast(value) << feature; _features = (_features & ~feature) | feature; } //! Get maximum look ahead. - ASMJIT_INLINE uint32_t getMaxLookAhead() const { return _maxLookAhead; } + ASMJIT_INLINE uint32_t getMaxLookAhead() const noexcept { + return _maxLookAhead; + } //! Set maximum look ahead to `val`. - ASMJIT_INLINE void setMaxLookAhead(uint32_t val) { _maxLookAhead = val; } + ASMJIT_INLINE void setMaxLookAhead(uint32_t val) noexcept { + _maxLookAhead = val; + } // -------------------------------------------------------------------------- // [Token ID] @@ -131,21 +199,29 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! \internal //! //! Reset the token-id generator. - ASMJIT_INLINE void _resetTokenGenerator() { _tokenGenerator = 0; } + ASMJIT_INLINE void _resetTokenGenerator() noexcept { + _tokenGenerator = 0; + } //! \internal //! //! Generate a new unique token id. - ASMJIT_INLINE uint32_t _generateUniqueToken() { return ++_tokenGenerator; } + ASMJIT_INLINE uint32_t _generateUniqueToken() noexcept { + return ++_tokenGenerator; + } // -------------------------------------------------------------------------- // [Instruction Options] // -------------------------------------------------------------------------- //! Get options of the next instruction. - ASMJIT_INLINE uint32_t getInstOptions() const { return _instOptions; } + ASMJIT_INLINE uint32_t getInstOptions() const noexcept { + return _instOptions; + } //! Set options of the next instruction. - ASMJIT_INLINE void setInstOptions(uint32_t instOptions) { _instOptions = instOptions; } + ASMJIT_INLINE void setInstOptions(uint32_t instOptions) noexcept { + _instOptions = instOptions; + } //! Get options of the next instruction and reset them. ASMJIT_INLINE uint32_t getInstOptionsAndReset() { @@ -160,28 +236,28 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! \internal template - ASMJIT_INLINE T* newNode() { + ASMJIT_INLINE T* newNode() noexcept { void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this); } //! \internal template - ASMJIT_INLINE T* newNode(P0 p0) { + ASMJIT_INLINE T* newNode(P0 p0) noexcept { void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0); } //! \internal template - ASMJIT_INLINE T* newNode(P0 p0, P1 p1) { + ASMJIT_INLINE T* newNode(P0 p0, P1 p1) noexcept { void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0, p1); } //! \internal template - ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) { + ASMJIT_INLINE T* newNode(P0 p0, P1 p1, P2 p2) noexcept { void* p = _zoneAllocator.alloc(sizeof(T)); return new(p) T(this, p0, p1, p2); } @@ -189,66 +265,66 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! \internal //! //! Create a new `HLData` node. - ASMJIT_API HLData* newDataNode(const void* data, uint32_t size); + ASMJIT_API HLData* newDataNode(const void* data, uint32_t size) noexcept; //! \internal //! //! Create a new `HLAlign` node. - ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset); + ASMJIT_API HLAlign* newAlignNode(uint32_t alignMode, uint32_t offset) noexcept; //! \internal //! //! Create a new `HLLabel` node. - ASMJIT_API HLLabel* newLabelNode(); + ASMJIT_API HLLabel* newLabelNode() noexcept; //! \internal //! //! Create a new `HLComment`. - ASMJIT_API HLComment* newCommentNode(const char* str); + ASMJIT_API HLComment* newCommentNode(const char* str) noexcept; //! \internal //! //! Create a new `HLHint`. - ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value); + ASMJIT_API HLHint* newHintNode(Var& var, uint32_t hint, uint32_t value) noexcept; // -------------------------------------------------------------------------- // [Code-Stream] // -------------------------------------------------------------------------- //! Add node `node` after current and set current to `node`. - ASMJIT_API HLNode* addNode(HLNode* node); + ASMJIT_API HLNode* addNode(HLNode* node) noexcept; //! Insert `node` before `ref`. - ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref); + ASMJIT_API HLNode* addNodeBefore(HLNode* node, HLNode* ref) noexcept; //! Insert `node` after `ref`. - ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref); + ASMJIT_API HLNode* addNodeAfter(HLNode* node, HLNode* ref) noexcept; //! Remove `node`. - ASMJIT_API HLNode* removeNode(HLNode* node); + ASMJIT_API HLNode* removeNode(HLNode* node) noexcept; //! Remove multiple nodes. - ASMJIT_API void removeNodes(HLNode* first, HLNode* last); + ASMJIT_API void removeNodes(HLNode* first, HLNode* last) noexcept; //! Get the first node. - ASMJIT_INLINE HLNode* getFirstNode() const { return _firstNode; } + ASMJIT_INLINE HLNode* getFirstNode() const noexcept { return _firstNode; } //! Get the last node. - ASMJIT_INLINE HLNode* getLastNode() const { return _lastNode; } + ASMJIT_INLINE HLNode* getLastNode() const noexcept { return _lastNode; } //! Get current node. //! //! \note If this method returns `nullptr` it means that nothing has been //! emitted yet. - ASMJIT_INLINE HLNode* getCursor() const { return _cursor; } + ASMJIT_INLINE HLNode* getCursor() const noexcept { return _cursor; } //! \internal //! //! Set the current node without returning the previous node. - ASMJIT_INLINE void _setCursor(HLNode* node) { _cursor = node; } + ASMJIT_INLINE void _setCursor(HLNode* node) noexcept { _cursor = node; } //! Set the current node to `node` and return the previous one. - ASMJIT_API HLNode* setCursor(HLNode* node); + ASMJIT_API HLNode* setCursor(HLNode* node) noexcept; // -------------------------------------------------------------------------- // [Func] // -------------------------------------------------------------------------- //! Get current function. - ASMJIT_INLINE HLFunc* getFunc() const { return _func; } + ASMJIT_INLINE HLFunc* getFunc() const noexcept { return _func; } // -------------------------------------------------------------------------- // [Align] @@ -258,7 +334,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! //! The sequence that is used to fill the gap between the aligned location //! and the current depends on `alignMode`, see \ref AlignMode. - ASMJIT_API Error align(uint32_t alignMode, uint32_t offset); + ASMJIT_API Error align(uint32_t alignMode, uint32_t offset) noexcept; // -------------------------------------------------------------------------- // [Label] @@ -267,126 +343,127 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! Get `HLLabel` by `id`. //! //! NOTE: The label has to be valid, see `isLabelValid()`. - ASMJIT_API HLLabel* getHLLabel(uint32_t id) const; + ASMJIT_API HLLabel* getHLLabel(uint32_t id) const noexcept; //! Get `HLLabel` by `label`. //! //! NOTE: The label has to be valid, see `isLabelValid()`. - ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) { return getHLLabel(label.getId()); } + ASMJIT_INLINE HLLabel* getHLLabel(const Label& label) noexcept { + return getHLLabel(label.getId()); + } //! Get whether the label `id` is valid. - ASMJIT_API bool isLabelValid(uint32_t id) const; + ASMJIT_API bool isLabelValid(uint32_t id) const noexcept; //! Get whether the `label` is valid. - ASMJIT_INLINE bool isLabelValid(const Label& label) const { return isLabelValid(label.getId()); } + ASMJIT_INLINE bool isLabelValid(const Label& label) const noexcept { + return isLabelValid(label.getId()); + } //! \internal //! //! Create a new label and return its ID. - ASMJIT_API uint32_t _newLabelId(); + ASMJIT_API uint32_t _newLabelId() noexcept; //! Create and return a new `Label`. - ASMJIT_INLINE Label newLabel() { return Label(_newLabelId()); } + ASMJIT_INLINE Label newLabel() noexcept { return Label(_newLabelId()); } //! Bind label to the current offset. //! //! \note Label can be bound only once! - ASMJIT_API Error bind(const Label& label); + ASMJIT_API Error bind(const Label& label) noexcept; // -------------------------------------------------------------------------- // [Embed] // -------------------------------------------------------------------------- //! Embed data. - ASMJIT_API Error embed(const void* data, uint32_t size); + ASMJIT_API Error embed(const void* data, uint32_t size) noexcept; //! Embed a constant pool data, adding the following in order: //! 1. Data alignment. //! 2. Label. //! 3. Constant pool data. - ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool); + ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) noexcept; // -------------------------------------------------------------------------- // [Comment] // -------------------------------------------------------------------------- //! Emit a single comment line. - ASMJIT_API Error comment(const char* fmt, ...); + ASMJIT_API Error comment(const char* fmt, ...) noexcept; // -------------------------------------------------------------------------- // [Hint] // -------------------------------------------------------------------------- //! Emit a new hint (purery informational node). - ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value); + ASMJIT_API Error _hint(Var& var, uint32_t hint, uint32_t value) noexcept; // -------------------------------------------------------------------------- // [Vars] // -------------------------------------------------------------------------- //! Get whether variable `var` is created. - ASMJIT_INLINE bool isVarValid(const Var& var) const { - return static_cast(var.getId() & kOperandIdNum) < _varList.getLength(); + ASMJIT_INLINE bool isVarValid(const Var& var) const noexcept { + return static_cast(var.getId() & Operand::kIdIndexMask) < _varList.getLength(); } //! \internal //! //! Get `VarData` by `var`. - ASMJIT_INLINE VarData* getVd(const Var& var) const { + ASMJIT_INLINE VarData* getVd(const Var& var) const noexcept { return getVdById(var.getId()); } //! \internal //! //! Get `VarData` by `id`. - ASMJIT_INLINE VarData* getVdById(uint32_t id) const { + ASMJIT_INLINE VarData* getVdById(uint32_t id) const noexcept { ASMJIT_ASSERT(id != kInvalidValue); - ASMJIT_ASSERT(static_cast(id & kOperandIdNum) < _varList.getLength()); + ASMJIT_ASSERT(static_cast(id & Operand::kIdIndexMask) < _varList.getLength()); - return _varList[id & kOperandIdNum]; + return _varList[id & Operand::kIdIndexMask]; } //! \internal //! //! Get an array of 'VarData*'. - ASMJIT_INLINE VarData** _getVdArray() const { + ASMJIT_INLINE VarData** _getVdArray() const noexcept { return const_cast(_varList.getData()); } //! \internal //! //! Create a new `VarData`. - ASMJIT_API VarData* _newVd(uint32_t type, uint32_t size, uint32_t c, const char* name); - - //! Create a new `Var`. - virtual Error _newVar(Var* var, uint32_t vType, const char* name, va_list ap) = 0; + ASMJIT_API VarData* _newVd(const VarInfo& vi, const char* name) noexcept; //! Alloc variable `var`. - ASMJIT_API Error alloc(Var& var); + ASMJIT_API Error alloc(Var& var) noexcept; //! Alloc variable `var` using `regIndex` as a register index. - ASMJIT_API Error alloc(Var& var, uint32_t regIndex); + ASMJIT_API Error alloc(Var& var, uint32_t regIndex) noexcept; //! Alloc variable `var` using `reg` as a register operand. - ASMJIT_API Error alloc(Var& var, const Reg& reg); + ASMJIT_API Error alloc(Var& var, const Reg& reg) noexcept; //! Spill variable `var`. - ASMJIT_API Error spill(Var& var); + ASMJIT_API Error spill(Var& var) noexcept; //! Save variable `var` if the status is `modified` at this point. - ASMJIT_API Error save(Var& var); + ASMJIT_API Error save(Var& var) noexcept; //! Unuse variable `var`. - ASMJIT_API Error unuse(Var& var); + ASMJIT_API Error unuse(Var& var) noexcept; //! Get priority of variable `var`. - ASMJIT_API uint32_t getPriority(Var& var) const; + ASMJIT_API uint32_t getPriority(Var& var) const noexcept; //! Set priority of variable `var` to `priority`. - ASMJIT_API void setPriority(Var& var, uint32_t priority); + ASMJIT_API void setPriority(Var& var, uint32_t priority) noexcept; //! Get save-on-unuse `var` property. - ASMJIT_API bool getSaveOnUnuse(Var& var) const; + ASMJIT_API bool getSaveOnUnuse(Var& var) const noexcept; //! Set save-on-unuse `var` property to `value`. - ASMJIT_API void setSaveOnUnuse(Var& var, bool value); + ASMJIT_API void setSaveOnUnuse(Var& var, bool value) noexcept; //! Rename variable `var` to `name`. //! //! \note Only new name will appear in the logger. - ASMJIT_API void rename(Var& var, const char* fmt, ...); + ASMJIT_API void rename(Var& var, const char* fmt, ...) noexcept; // -------------------------------------------------------------------------- // [Stack] @@ -395,7 +472,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! \internal //! //! Create a new memory chunk allocated on the current function's stack. - virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) = 0; + virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept = 0; // -------------------------------------------------------------------------- // [Const] @@ -404,7 +481,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { //! \internal //! //! Put data to a constant-pool and get a memory reference to it. - virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) = 0; + virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept = 0; // -------------------------------------------------------------------------- // [Members] @@ -471,12 +548,7 @@ struct ASMJIT_VIRTAPI Compiler : public CodeGen { // [Defined-Later] // ============================================================================ -ASMJIT_INLINE Label::Label(Compiler& c) : Operand(NoInit) { - reset(); - _label.id = c._newLabelId(); -} - -ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) { +ASMJIT_INLINE HLNode::HLNode(Compiler* compiler, uint32_t type) noexcept { _prev = nullptr; _next = nullptr; _type = static_cast(type); diff --git a/src/asmjit/base/compilercontext.cpp b/src/asmjit/base/compilercontext.cpp index 72a4d90..c4915e9 100644 --- a/src/asmjit/base/compilercontext.cpp +++ b/src/asmjit/base/compilercontext.cpp @@ -298,7 +298,7 @@ Error Context::removeUnreachableCode() { node = first; do { HLNode* next = node->getNext(); - if (!node->isInformative() && node->getType() != kHLNodeTypeAlign) { + if (!node->isInformative() && node->getType() != HLNode::kTypeAlign) { ASMJIT_TLOG("[%05d] Unreachable\n", node->getFlowId()); compiler->removeNode(node); } @@ -394,7 +394,7 @@ _OnVisit: } } - if (node->getType() == kHLNodeTypeLabel) + if (node->getType() == HLNode::kTypeLabel) goto _OnTarget; if (node == func) @@ -413,7 +413,7 @@ _OnPatch: if (!bNode->_addBitsDelSource(bCur, bLen)) goto _OnDone; - if (node->getType() == kHLNodeTypeLabel) + if (node->getType() == HLNode::kTypeLabel) goto _OnTarget; if (node == func) @@ -574,15 +574,6 @@ Error Context::formatInlineComment(StringBuilder& dst, HLNode* node) { return kErrorOk; } -// ============================================================================ -// [asmjit::Context - Schedule] -// ============================================================================ - -Error Context::schedule() { - // By default there is no instruction scheduler implemented. - return kErrorOk; -} - // ============================================================================ // [asmjit::Context - Cleanup] // ============================================================================ @@ -626,9 +617,6 @@ Error Context::compile(HLFunc* func) { ASMJIT_PROPAGATE_ERROR(translate()); - if (compiler->hasFeature(kCompilerFeatureEnableScheduler)) - ASMJIT_PROPAGATE_ERROR(schedule()); - // We alter the compiler cursor, because it doesn't make sense to reference // it after compilation - some nodes may disappear and it's forbidden to add // new code after the compilation is done. diff --git a/src/asmjit/base/compilercontext_p.h b/src/asmjit/base/compilercontext_p.h index 8500b5e..4b51fcf 100644 --- a/src/asmjit/base/compilercontext_p.h +++ b/src/asmjit/base/compilercontext_p.h @@ -13,6 +13,7 @@ // [Dependencies - AsmJit] #include "../base/compiler.h" +#include "../base/podvector.h" #include "../base/zone.h" // [Api-Begin] @@ -23,22 +24,6 @@ namespace asmjit { //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::VarFlags] -// ============================================================================ - -//! \internal -//! -//! X86/X64 variable flags. -ASMJIT_ENUM(VarFlags) { - //! Variable contains single-precision floating-point(s). - kVarFlagSp = 0x10, - //! Variable contains double-precision floating-point(s). - kVarFlagDp = 0x20, - //! Variable is packed, i.e. packed floats, doubles, ... - kVarFlagPacked = 0x40 -}; - // ============================================================================ // [asmjit::VarAttrFlags] // ============================================================================ @@ -801,12 +786,6 @@ struct Context { //! Translate code by allocating registers and handling state changes. virtual Error translate() = 0; - // -------------------------------------------------------------------------- - // [Schedule] - // -------------------------------------------------------------------------- - - virtual Error schedule(); - // -------------------------------------------------------------------------- // [Cleanup] // -------------------------------------------------------------------------- diff --git a/src/asmjit/base/compilerfunc.h b/src/asmjit/base/compilerfunc.h index f1f8f3c..5225783 100644 --- a/src/asmjit/base/compilerfunc.h +++ b/src/asmjit/base/compilerfunc.h @@ -37,28 +37,37 @@ namespace asmjit { //! Platform Independent Conventions //! -------------------------------- //! -//! - `kCallConvHost` - Should match the current C++ compiler native calling -//! convention. +//! - `kCallConvHost` - Should match the current C++ compiler native calling +//! convention. //! //! X86/X64 Specific Conventions //! ---------------------------- //! //! List of calling conventions for 32-bit x86 mode: -//! - `kCallConvX86CDecl` - Calling convention for C runtime. -//! - `kCallConvX86StdCall` - Calling convention for WinAPI functions. -//! - `kCallConvX86MsThisCall` - Calling convention for C++ members under -//! Windows (produced by MSVC and all MSVC compatible compilers). -//! - `kCallConvX86MsFastCall` - Fastest calling convention that can be used -//! by MSVC compiler. -//! - `kCallConvX86BorlandFastCall` - Borland fastcall convention. -//! - `kCallConvX86GccFastCall` - GCC fastcall convention (2 register arguments). -//! - `kCallConvX86GccRegParm1` - GCC regparm(1) convention. -//! - `kCallConvX86GccRegParm2` - GCC regparm(2) convention. -//! - `kCallConvX86GccRegParm3` - GCC regparm(3) convention. +//! - `kCallConvX86CDecl` - Calling convention for C runtime. +//! - `kCallConvX86StdCall` - Calling convention for WinAPI functions. +//! - `kCallConvX86MsThisCall` - Calling convention for C++ members under +//! Windows (produced by MSVC and all MSVC compatible compilers). +//! - `kCallConvX86MsFastCall` - Fastest calling convention that can be used +//! by MSVC compiler. +//! - `kCallConvX86BorlandFastCall` - Borland fastcall convention. +//! - `kCallConvX86GccFastCall` - GCC fastcall convention (2 register arguments). +//! - `kCallConvX86GccRegParm1` - GCC regparm(1) convention. +//! - `kCallConvX86GccRegParm2` - GCC regparm(2) convention. +//! - `kCallConvX86GccRegParm3` - GCC regparm(3) convention. //! //! List of calling conventions for 64-bit x86 mode (x64): -//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI). -//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI). +//! - `kCallConvX64Win` - Windows 64-bit calling convention (WIN64 ABI). +//! - `kCallConvX64Unix` - Unix 64-bit calling convention (AMD64 ABI). +//! +//! ARM Specific Conventions +//! ------------------------ +//! +//! List of ARM calling conventions: +//! - `kCallConvArm32SoftFP` - Legacy calling convention, floating point +//! arguments are passed via GP registers. +//! - `kCallConvArm32HardFP` - Modern calling convention, uses VFP registers +//! to pass floating point arguments. ASMJIT_ENUM(CallConv) { //! Calling convention is invalid (can't be used). kCallConvNone = 0, @@ -289,6 +298,13 @@ ASMJIT_ENUM(CallConv) { //! Stack is always aligned to 16 bytes. kCallConvX64Unix = 11, + // -------------------------------------------------------------------------- + // [ARM] + // -------------------------------------------------------------------------- + + kCallConvArm32SoftFP = 16, + kCallConvArm32HardFP = 17, + // -------------------------------------------------------------------------- // [Internal] // -------------------------------------------------------------------------- @@ -303,6 +319,11 @@ ASMJIT_ENUM(CallConv) { //! \internal _kCallConvX64End = 11, + //! \internal + _kCallConvArmStart = 16, + //! \internal + _kCallConvArmEnd = 17, + // -------------------------------------------------------------------------- // [Host] // -------------------------------------------------------------------------- @@ -313,16 +334,16 @@ ASMJIT_ENUM(CallConv) { //! NOTE: This should be always the same as `kCallConvHostCDecl`, but some //! compilers allow to override the default calling convention. Overriding //! is not detected at the moment. - kCallConvHost = DETECTED_AT_COMPILE_TIME, + kCallConvHost = DETECTED_AT_COMPILE_TIME, //! Default C calling convention based on the current compiler's settings. - kCallConvHostCDecl = DETECTED_AT_COMPILE_TIME, + kCallConvHostCDecl = DETECTED_AT_COMPILE_TIME, //! Compatibility for `__stdcall` calling convention. //! //! NOTE: This enumeration is always set to a value which is compatible with //! the current compiler's `__stdcall` calling convention. In 64-bit mode //! there is no such convention and the value is mapped to `kCallConvX64Win` //! or `kCallConvX64Unix`, depending on the host architecture. - kCallConvHostStdCall = DETECTED_AT_COMPILE_TIME, + kCallConvHostStdCall = DETECTED_AT_COMPILE_TIME, //! Compatibility for `__fastcall` calling convention. //! //! NOTE: This enumeration is always set to a value which is compatible with @@ -347,6 +368,16 @@ ASMJIT_ENUM(CallConv) { kCallConvHostCDecl = kCallConvHost, kCallConvHostStdCall = kCallConvHost, kCallConvHostFastCall = kCallConvHost +#elif ASMJIT_ARCH_ARM32 +# if defined(__SOFTFP__) + kCallConvHost = kCallConvArm32SoftFP, +# else + kCallConvHost = kCallConvArm32HardFP, +# endif + // These don't exist on ARM. + kCallConvHostCDecl = kCallConvHost, + kCallConvHostStdCall = kCallConvHost, + kCallConvHostFastCall = kCallConvHost #else # error "[asmjit] Couldn't determine the target's calling convention." #endif @@ -484,6 +515,7 @@ ASMJIT_ENUM(FuncArgIndex) { //! This value is typically omitted and added only if there is HI argument //! accessed. kFuncArgLo = 0, + //! Index to the HI part of function argument. //! //! HI part of function argument depends on target architecture. On x86 it's diff --git a/src/asmjit/base/constpool.cpp b/src/asmjit/base/constpool.cpp index 47d9396..ee2372a 100644 --- a/src/asmjit/base/constpool.cpp +++ b/src/asmjit/base/constpool.cpp @@ -27,7 +27,7 @@ namespace asmjit { //! \internal //! //! Remove left horizontal links. -static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) { +static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* node) noexcept { ConstPool::Node* link = node->_link[0]; uint32_t level = node->_level; @@ -44,7 +44,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_skewNode(ConstPool::Node* no //! \internal //! //! Remove consecutive horizontal links. -static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) { +static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* node) noexcept { ConstPool::Node* link = node->_link[1]; uint32_t level = node->_level; @@ -59,7 +59,7 @@ static ASMJIT_INLINE ConstPool::Node* ConstPoolTree_splitNode(ConstPool::Node* n return node; } -ConstPool::Node* ConstPool::Tree::get(const void* data) { +ConstPool::Node* ConstPool::Tree::get(const void* data) noexcept { ConstPool::Node* node = _root; size_t dataSize = _dataSize; @@ -73,7 +73,7 @@ ConstPool::Node* ConstPool::Tree::get(const void* data) { return nullptr; } -void ConstPool::Tree::put(ConstPool::Node* newNode) { +void ConstPool::Tree::put(ConstPool::Node* newNode) noexcept { size_t dataSize = _dataSize; _length++; @@ -126,7 +126,7 @@ void ConstPool::Tree::put(ConstPool::Node* newNode) { // [asmjit::ConstPool - Construction / Destruction] // ============================================================================ -ConstPool::ConstPool(Zone* zone) { +ConstPool::ConstPool(Zone* zone) noexcept { _zone = zone; size_t dataSize = 1; @@ -141,13 +141,13 @@ ConstPool::ConstPool(Zone* zone) { _alignment = 0; } -ConstPool::~ConstPool() {} +ConstPool::~ConstPool() noexcept {} // ============================================================================ // [asmjit::ConstPool - Reset] // ============================================================================ -void ConstPool::reset() { +void ConstPool::reset() noexcept { for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) { _tree[i].reset(); _gaps[i] = nullptr; @@ -162,7 +162,7 @@ void ConstPool::reset() { // [asmjit::ConstPool - Ops] // ============================================================================ -static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) { +static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept { ConstPool::Gap* gap = self->_gapPool; if (gap == nullptr) return self->_zone->allocT(); @@ -171,12 +171,12 @@ static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) { return gap; } -static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) { +static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept { gap->_next = self->_gapPool; self->_gapPool = gap; } -static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) { +static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) noexcept { ASMJIT_ASSERT(length > 0); while (length > 0) { @@ -222,7 +222,7 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t length) { } } -Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { +Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept { size_t treeIndex; if (size == 32) @@ -329,11 +329,11 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) { // ============================================================================ struct ConstPoolFill { - ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) : + ASMJIT_INLINE ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept : _dst(dst), _dataSize(dataSize) {} - ASMJIT_INLINE void visit(const ConstPool::Node* node) { + ASMJIT_INLINE void visit(const ConstPool::Node* node) noexcept { if (!node->_shared) ::memcpy(_dst + node->_offset, node->getData(), _dataSize); } @@ -342,7 +342,7 @@ struct ConstPoolFill { size_t _dataSize; }; -void ConstPool::fill(void* dst) const { +void ConstPool::fill(void* dst) const noexcept { // Clears possible gaps, asmjit should never emit garbage to the output. ::memset(dst, 0, _size); diff --git a/src/asmjit/base/constpool.h b/src/asmjit/base/constpool.h index 981f0e8..1497eb6 100644 --- a/src/asmjit/base/constpool.h +++ b/src/asmjit/base/constpool.h @@ -65,7 +65,7 @@ struct ConstPool { // [Accessors] // -------------------------------------------------------------------------- - ASMJIT_INLINE void* getData() const { + ASMJIT_INLINE void* getData() const noexcept { return static_cast(const_cast(this) + 1); } @@ -100,7 +100,7 @@ struct ConstPool { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE Tree(size_t dataSize = 0) + ASMJIT_INLINE Tree(size_t dataSize = 0) noexcept : _root(nullptr), _length(0), _dataSize(dataSize) {} @@ -110,7 +110,7 @@ struct ConstPool { // [Reset] // -------------------------------------------------------------------------- - ASMJIT_INLINE void reset() { + ASMJIT_INLINE void reset() noexcept { _root = nullptr; _length = 0; } @@ -119,10 +119,10 @@ struct ConstPool { // [Accessors] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool isEmpty() const { return _length == 0; } - ASMJIT_INLINE size_t getLength() const { return _length; } + ASMJIT_INLINE bool isEmpty() const noexcept { return _length == 0; } + ASMJIT_INLINE size_t getLength() const noexcept { return _length; } - ASMJIT_INLINE void setDataSize(size_t dataSize) { + ASMJIT_INLINE void setDataSize(size_t dataSize) noexcept { ASMJIT_ASSERT(isEmpty()); _dataSize = dataSize; } @@ -131,15 +131,15 @@ struct ConstPool { // [Ops] // -------------------------------------------------------------------------- - ASMJIT_API Node* get(const void* data); - ASMJIT_API void put(Node* node); + ASMJIT_API Node* get(const void* data) noexcept; + ASMJIT_API void put(Node* node) noexcept; // -------------------------------------------------------------------------- // [Iterate] // -------------------------------------------------------------------------- template - ASMJIT_INLINE void iterate(Visitor& visitor) const { + ASMJIT_INLINE void iterate(Visitor& visitor) const noexcept { Node* node = const_cast(_root); Node* link; @@ -182,7 +182,7 @@ struct ConstPool { // [Helpers] // -------------------------------------------------------------------------- - static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) { + static ASMJIT_INLINE Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept { Node* node = zone->allocT(sizeof(Node) + size); if (node == nullptr) return nullptr; @@ -213,25 +213,25 @@ struct ConstPool { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_API ConstPool(Zone* zone); - ASMJIT_API ~ConstPool(); + ASMJIT_API ConstPool(Zone* zone) noexcept; + ASMJIT_API ~ConstPool() noexcept; // -------------------------------------------------------------------------- // [Reset] // -------------------------------------------------------------------------- - ASMJIT_API void reset(); + ASMJIT_API void reset() noexcept; // -------------------------------------------------------------------------- // [Ops] // -------------------------------------------------------------------------- //! Get whether the constant-pool is empty. - ASMJIT_INLINE bool isEmpty() const { return _size == 0; } + ASMJIT_INLINE bool isEmpty() const noexcept { return _size == 0; } //! Get the size of the constant-pool in bytes. - ASMJIT_INLINE size_t getSize() const { return _size; } + ASMJIT_INLINE size_t getSize() const noexcept { return _size; } //! Get minimum alignment. - ASMJIT_INLINE size_t getAlignment() const { return _alignment; } + ASMJIT_INLINE size_t getAlignment() const noexcept { return _alignment; } //! Add a constant to the constant pool. //! @@ -250,14 +250,14 @@ struct ConstPool { //! been already added. For example if you try to add 4-byte constant and then //! 8-byte constant having the same 4-byte pattern as the previous one, two //! independent slots will be generated by the pool. - ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset); + ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept; // -------------------------------------------------------------------------- // [Fill] // -------------------------------------------------------------------------- //! Fill the destination with the constants from the pool. - ASMJIT_API void fill(void* dst) const; + ASMJIT_API void fill(void* dst) const noexcept; // -------------------------------------------------------------------------- // [Members] diff --git a/src/asmjit/base/containers.cpp b/src/asmjit/base/containers.cpp index 709558d..b93559f 100644 --- a/src/asmjit/base/containers.cpp +++ b/src/asmjit/base/containers.cpp @@ -17,113 +17,19 @@ namespace asmjit { // ============================================================================ -// [asmjit::PodVectorBase - NullData] +// [asmjit::StringBuilder - Construction / Destruction] // ============================================================================ -const PodVectorData PodVectorBase::_nullData = { 0, 0 }; - -// ============================================================================ -// [asmjit::PodVectorBase - Reset] -// ============================================================================ - -//! Clear vector data and free internal buffer. -void PodVectorBase::reset(bool releaseMemory) { - PodVectorData* d = _d; - - if (d == &_nullData) - return; - - if (releaseMemory) { - ASMJIT_FREE(d); - _d = const_cast(&_nullData); - return; - } - - d->length = 0; -} - -// ============================================================================ -// [asmjit::PodVectorBase - Helpers] -// ============================================================================ - -Error PodVectorBase::_grow(size_t n, size_t sizeOfT) { - PodVectorData* d = _d; - - size_t threshold = kMemAllocGrowMax / sizeOfT; - size_t capacity = d->capacity; - size_t after = d->length; - - if (IntTraits::maxValue() - n < after) - return kErrorNoHeapMemory; - - after += n; - - if (capacity >= after) - return kErrorOk; - - // PodVector is used as a linear array for some data structures used by - // AsmJit code generation. The purpose of this agressive growing schema - // is to minimize memory reallocations, because AsmJit code generation - // classes live short life and will be freed or reused soon. - if (capacity < 32) - capacity = 32; - else if (capacity < 128) - capacity = 128; - else if (capacity < 512) - capacity = 512; - - while (capacity < after) { - if (capacity < threshold) - capacity *= 2; - else - capacity += threshold; - } - - return _reserve(capacity, sizeOfT); -} - -Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) { - PodVectorData* d = _d; - - if (d->capacity >= n) - return kErrorOk; - - size_t nBytes = sizeof(PodVectorData) + n * sizeOfT; - if (nBytes < n) - return kErrorNoHeapMemory; - - if (d == &_nullData) { - d = static_cast(ASMJIT_ALLOC(nBytes)); - if (d == nullptr) - return kErrorNoHeapMemory; - d->length = 0; - } - else { - d = static_cast(ASMJIT_REALLOC(d, nBytes)); - if (d == nullptr) - return kErrorNoHeapMemory; - } - - d->capacity = n; - _d = d; - - return kErrorOk; -} - // Should be placed in read-only memory. static const char StringBuilder_empty[4] = { 0 }; -// ============================================================================ -// [asmjit::StringBuilder - Construction / Destruction] -// ============================================================================ - -StringBuilder::StringBuilder() +StringBuilder::StringBuilder() noexcept : _data(const_cast(StringBuilder_empty)), _length(0), _capacity(0), _canFree(false) {} -StringBuilder::~StringBuilder() { +StringBuilder::~StringBuilder() noexcept { if (_canFree) ASMJIT_FREE(_data); } @@ -132,7 +38,7 @@ StringBuilder::~StringBuilder() { // [asmjit::StringBuilder - Prepare / Reserve] // ============================================================================ -char* StringBuilder::prepare(uint32_t op, size_t len) { +char* StringBuilder::prepare(uint32_t op, size_t len) noexcept { // -------------------------------------------------------------------------- // [Set] // -------------------------------------------------------------------------- @@ -231,7 +137,7 @@ char* StringBuilder::prepare(uint32_t op, size_t len) { } } -bool StringBuilder::reserve(size_t to) { +bool StringBuilder::reserve(size_t to) noexcept { if (_capacity >= to) return true; @@ -258,7 +164,7 @@ bool StringBuilder::reserve(size_t to) { // [asmjit::StringBuilder - Clear] // ============================================================================ -void StringBuilder::clear() { +void StringBuilder::clear() noexcept { if (_data != StringBuilder_empty) _data[0] = 0; _length = 0; @@ -268,7 +174,7 @@ void StringBuilder::clear() { // [asmjit::StringBuilder - Methods] // ============================================================================ -bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) { +bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) noexcept { if (len == kInvalidIndex) len = str != nullptr ? ::strlen(str) : static_cast(0); @@ -280,7 +186,7 @@ bool StringBuilder::_opString(uint32_t op, const char* str, size_t len) { return true; } -bool StringBuilder::_opChar(uint32_t op, char c) { +bool StringBuilder::_opChar(uint32_t op, char c) noexcept { char* p = prepare(op, 1); if (p == nullptr) return false; @@ -289,7 +195,7 @@ bool StringBuilder::_opChar(uint32_t op, char c) { return true; } -bool StringBuilder::_opChars(uint32_t op, char c, size_t len) { +bool StringBuilder::_opChars(uint32_t op, char c, size_t len) noexcept { char* p = prepare(op, len); if (p == nullptr) return false; @@ -300,7 +206,7 @@ bool StringBuilder::_opChars(uint32_t op, char c, size_t len) { static const char StringBuilder_numbers[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; -bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) { +bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept { if (base < 2 || base > 36) base = 10; @@ -389,7 +295,7 @@ bool StringBuilder::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t wid return true; } -bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) { +bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) noexcept { if (len >= IntTraits::maxValue() / 2) return false; @@ -407,7 +313,7 @@ bool StringBuilder::_opHex(uint32_t op, const void* data, size_t len) { return true; } -bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) { +bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept { char buf[1024]; vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap); @@ -416,7 +322,7 @@ bool StringBuilder::_opVFormat(uint32_t op, const char* fmt, va_list ap) { return _opString(op, buf); } -bool StringBuilder::setFormat(const char* fmt, ...) { +bool StringBuilder::setFormat(const char* fmt, ...) noexcept { bool result; va_list ap; @@ -427,7 +333,7 @@ bool StringBuilder::setFormat(const char* fmt, ...) { return result; } -bool StringBuilder::appendFormat(const char* fmt, ...) { +bool StringBuilder::appendFormat(const char* fmt, ...) noexcept { bool result; va_list ap; @@ -438,7 +344,7 @@ bool StringBuilder::appendFormat(const char* fmt, ...) { return result; } -bool StringBuilder::eq(const char* str, size_t len) const { +bool StringBuilder::eq(const char* str, size_t len) const noexcept { const char* aData = _data; const char* bData = str; diff --git a/src/asmjit/base/containers.h b/src/asmjit/base/containers.h index 1f073d7..62157e1 100644 --- a/src/asmjit/base/containers.h +++ b/src/asmjit/base/containers.h @@ -40,15 +40,15 @@ struct BitArray { // [Accessors] // -------------------------------------------------------------------------- - ASMJIT_INLINE uintptr_t getBit(uint32_t index) const { + ASMJIT_INLINE uintptr_t getBit(uint32_t index) const noexcept { return (data[index / kEntityBits] >> (index % kEntityBits)) & 1; } - ASMJIT_INLINE void setBit(uint32_t index) { + ASMJIT_INLINE void setBit(uint32_t index) noexcept { data[index / kEntityBits] |= static_cast(1) << (index % kEntityBits); } - ASMJIT_INLINE void delBit(uint32_t index) { + ASMJIT_INLINE void delBit(uint32_t index) noexcept { data[index / kEntityBits] &= ~(static_cast(1) << (index % kEntityBits)); } @@ -57,7 +57,7 @@ struct BitArray { // -------------------------------------------------------------------------- //! Copy bits from `s0`, returns `true` if at least one bit is set in `s0`. - ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) { + ASMJIT_INLINE bool copyBits(const BitArray* s0, uint32_t len) noexcept { uintptr_t r = 0; for (uint32_t i = 0; i < len; i++) { uintptr_t t = s0->data[i]; @@ -67,11 +67,11 @@ struct BitArray { return r != 0; } - ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) { + ASMJIT_INLINE bool addBits(const BitArray* s0, uint32_t len) noexcept { return addBits(this, s0, len); } - ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool addBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept { uintptr_t r = 0; for (uint32_t i = 0; i < len; i++) { uintptr_t t = s0->data[i] | s1->data[i]; @@ -81,11 +81,11 @@ struct BitArray { return r != 0; } - ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool andBits(const BitArray* s1, uint32_t len) noexcept { return andBits(this, s1, len); } - ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool andBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept { uintptr_t r = 0; for (uint32_t i = 0; i < len; i++) { uintptr_t t = s0->data[i] & s1->data[i]; @@ -95,11 +95,11 @@ struct BitArray { return r != 0; } - ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool delBits(const BitArray* s1, uint32_t len) noexcept { return delBits(this, s1, len); } - ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool delBits(const BitArray* s0, const BitArray* s1, uint32_t len) noexcept { uintptr_t r = 0; for (uint32_t i = 0; i < len; i++) { uintptr_t t = s0->data[i] & ~s1->data[i]; @@ -109,11 +109,11 @@ struct BitArray { return r != 0; } - ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool _addBitsDelSource(BitArray* s1, uint32_t len) noexcept { return _addBitsDelSource(this, s1, len); } - ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) { + ASMJIT_INLINE bool _addBitsDelSource(const BitArray* s0, BitArray* s1, uint32_t len) noexcept { uintptr_t r = 0; for (uint32_t i = 0; i < len; i++) { uintptr_t a = s0->data[i]; @@ -135,217 +135,6 @@ struct BitArray { uintptr_t data[1]; }; -// ============================================================================ -// [asmjit::PodVectorData] -// ============================================================================ - -//! \internal -struct PodVectorData { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get data. - ASMJIT_INLINE void* getData() const { return (void*)(this + 1); } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Capacity of the vector. - size_t capacity; - //! Length of the vector. - size_t length; -}; - -// ============================================================================ -// [asmjit::PodVectorBase] -// ============================================================================ - -//! \internal -struct PodVectorBase { - static ASMJIT_API const PodVectorData _nullData; - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new instance of `PodVectorBase`. - ASMJIT_INLINE PodVectorBase() : _d(const_cast(&_nullData)) {} - //! Destroy the `PodVectorBase` and data. - ASMJIT_INLINE ~PodVectorBase() { reset(true); } - - // -------------------------------------------------------------------------- - // [Reset] - // -------------------------------------------------------------------------- - - //! Reset the vector data and set its `length` to zero. - //! - //! If `releaseMemory` is true the vector buffer will be released to the - //! system. - ASMJIT_API void reset(bool releaseMemory = false); - - // -------------------------------------------------------------------------- - // [Grow / Reserve] - // -------------------------------------------------------------------------- - -protected: - ASMJIT_API Error _grow(size_t n, size_t sizeOfT); - ASMJIT_API Error _reserve(size_t n, size_t sizeOfT); - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - -public: - PodVectorData* _d; -}; - -// ============================================================================ -// [asmjit::PodVector] -// ============================================================================ - -//! Template used to store and manage array of POD data. -//! -//! This template has these adventages over other vector<> templates: -//! - Non-copyable (designed to be non-copyable, we want it) -//! - No copy-on-write (some implementations of stl can use it) -//! - Optimized for working only with POD types -//! - Uses ASMJIT_... memory management macros -template -struct PodVector : PodVectorBase { - ASMJIT_NO_COPY(PodVector) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - //! Create a new instance of `PodVector`. - ASMJIT_INLINE PodVector() {} - //! Destroy the `PodVector<>` and data. - ASMJIT_INLINE ~PodVector() {} - - // -------------------------------------------------------------------------- - // [Data] - // -------------------------------------------------------------------------- - - //! Get whether the vector is empty. - ASMJIT_INLINE bool isEmpty() const { return _d->length == 0; } - //! Get length. - ASMJIT_INLINE size_t getLength() const { return _d->length; } - //! Get capacity. - ASMJIT_INLINE size_t getCapacity() const { return _d->capacity; } - //! Get data. - ASMJIT_INLINE T* getData() { return static_cast(_d->getData()); } - //! \overload - ASMJIT_INLINE const T* getData() const { return static_cast(_d->getData()); } - - // -------------------------------------------------------------------------- - // [Grow / Reserve] - // -------------------------------------------------------------------------- - - //! Called to grow the buffer to fit at least `n` elements more. - ASMJIT_INLINE Error _grow(size_t n) { return PodVectorBase::_grow(n, sizeof(T)); } - //! Realloc internal array to fit at least `n` items. - ASMJIT_INLINE Error _reserve(size_t n) { return PodVectorBase::_reserve(n, sizeof(T)); } - - // -------------------------------------------------------------------------- - // [Ops] - // -------------------------------------------------------------------------- - - //! Prepend `item` to vector. - Error prepend(const T& item) { - PodVectorData* d = _d; - - if (d->length == d->capacity) { - ASMJIT_PROPAGATE_ERROR(_grow(1)); - _d = d; - } - - ::memmove(static_cast(d->getData()) + 1, d->getData(), d->length * sizeof(T)); - ::memcpy(d->getData(), &item, sizeof(T)); - - d->length++; - return kErrorOk; - } - - //! Insert an `item` at the `index`. - Error insert(size_t index, const T& item) { - PodVectorData* d = _d; - ASMJIT_ASSERT(index <= d->length); - - if (d->length == d->capacity) { - ASMJIT_PROPAGATE_ERROR(_grow(1)); - d = _d; - } - - T* dst = static_cast(d->getData()) + index; - ::memmove(dst + 1, dst, d->length - index); - ::memcpy(dst, &item, sizeof(T)); - - d->length++; - return kErrorOk; - } - - //! Append `item` to vector. - Error append(const T& item) { - PodVectorData* d = _d; - - if (d->length == d->capacity) { - ASMJIT_PROPAGATE_ERROR(_grow(1)); - d = _d; - } - - ::memcpy(static_cast(d->getData()) + d->length, &item, sizeof(T)); - - d->length++; - return kErrorOk; - } - - //! Get index of `val` or `kInvalidIndex` if not found. - size_t indexOf(const T& val) const { - PodVectorData* d = _d; - - const T* data = static_cast(d->getData()); - size_t len = d->length; - - for (size_t i = 0; i < len; i++) - if (data[i] == val) - return i; - - return kInvalidIndex; - } - - //! Remove item at index `i`. - void removeAt(size_t i) { - PodVectorData* d = _d; - ASMJIT_ASSERT(i < d->length); - - T* data = static_cast(d->getData()) + i; - d->length--; - ::memmove(data, data + 1, d->length - i); - } - - //! Swap this pod-vector with `other`. - void swap(PodVector& other) { - T* otherData = other._d; - other._d = _d; - _d = otherData; - } - - //! Get item at index `i`. - ASMJIT_INLINE T& operator[](size_t i) { - ASMJIT_ASSERT(i < getLength()); - return getData()[i]; - } - - //! Get item at index `i`. - ASMJIT_INLINE const T& operator[](size_t i) const { - ASMJIT_ASSERT(i < getLength()); - return getData()[i]; - } -}; - // ============================================================================ // [asmjit::PodList] // ============================================================================ @@ -365,12 +154,12 @@ struct PodList { // -------------------------------------------------------------------------- //! Get next node. - ASMJIT_INLINE Link* getNext() const { return _next; } + ASMJIT_INLINE Link* getNext() const noexcept { return _next; } //! Get value. - ASMJIT_INLINE T getValue() const { return _value; } + ASMJIT_INLINE T getValue() const noexcept { return _value; } //! Set value to `value`. - ASMJIT_INLINE void setValue(const T& value) { _value = value; } + ASMJIT_INLINE void setValue(const T& value) noexcept { _value = value; } // -------------------------------------------------------------------------- // [Members] @@ -384,35 +173,35 @@ struct PodList { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE PodList() : _first(nullptr), _last(nullptr) {} - ASMJIT_INLINE ~PodList() {} + ASMJIT_INLINE PodList() noexcept : _first(nullptr), _last(nullptr) {} + ASMJIT_INLINE ~PodList() noexcept {} // -------------------------------------------------------------------------- // [Data] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool isEmpty() const { return _first != nullptr; } + ASMJIT_INLINE bool isEmpty() const noexcept { return _first != nullptr; } - ASMJIT_INLINE Link* getFirst() const { return _first; } - ASMJIT_INLINE Link* getLast() const { return _last; } + ASMJIT_INLINE Link* getFirst() const noexcept { return _first; } + ASMJIT_INLINE Link* getLast() const noexcept { return _last; } // -------------------------------------------------------------------------- // [Ops] // -------------------------------------------------------------------------- - ASMJIT_INLINE void reset() { + ASMJIT_INLINE void reset() noexcept { _first = nullptr; _last = nullptr; } - ASMJIT_INLINE void prepend(Link* link) { + ASMJIT_INLINE void prepend(Link* link) noexcept { link->_next = _first; if (_first == nullptr) _last = link; _first = link; } - ASMJIT_INLINE void append(Link* link) { + ASMJIT_INLINE void append(Link* link) noexcept { link->_next = nullptr; if (_first == nullptr) _first = link; @@ -472,92 +261,92 @@ struct StringBuilder { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_API StringBuilder(); - ASMJIT_API ~StringBuilder(); + ASMJIT_API StringBuilder() noexcept; + ASMJIT_API ~StringBuilder() noexcept; - ASMJIT_INLINE StringBuilder(const _NoInit&) {} + ASMJIT_INLINE StringBuilder(const _NoInit&) noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get string builder capacity. - ASMJIT_INLINE size_t getCapacity() const { return _capacity; } + ASMJIT_INLINE size_t getCapacity() const noexcept { return _capacity; } //! Get length. - ASMJIT_INLINE size_t getLength() const { return _length; } + ASMJIT_INLINE size_t getLength() const noexcept { return _length; } //! Get null-terminated string data. - ASMJIT_INLINE char* getData() { return _data; } + ASMJIT_INLINE char* getData() noexcept { return _data; } //! Get null-terminated string data (const). - ASMJIT_INLINE const char* getData() const { return _data; } + ASMJIT_INLINE const char* getData() const noexcept { return _data; } // -------------------------------------------------------------------------- // [Prepare / Reserve] // -------------------------------------------------------------------------- //! Prepare to set/append. - ASMJIT_API char* prepare(uint32_t op, size_t len); + ASMJIT_API char* prepare(uint32_t op, size_t len) noexcept; //! Reserve `to` bytes in string builder. - ASMJIT_API bool reserve(size_t to); + ASMJIT_API bool reserve(size_t to) noexcept; // -------------------------------------------------------------------------- // [Clear] // -------------------------------------------------------------------------- //! Clear the content in String builder. - ASMJIT_API void clear(); + ASMJIT_API void clear() noexcept; // -------------------------------------------------------------------------- // [Op] // -------------------------------------------------------------------------- - ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex); - ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap); - ASMJIT_API bool _opChar(uint32_t op, char c); - ASMJIT_API bool _opChars(uint32_t op, char c, size_t len); - ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0); - ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len); + ASMJIT_API bool _opString(uint32_t op, const char* str, size_t len = kInvalidIndex) noexcept; + ASMJIT_API bool _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept; + ASMJIT_API bool _opChar(uint32_t op, char c) noexcept; + ASMJIT_API bool _opChars(uint32_t op, char c, size_t len) noexcept; + ASMJIT_API bool _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept; + ASMJIT_API bool _opHex(uint32_t op, const void* data, size_t len) noexcept; // -------------------------------------------------------------------------- // [Set] // -------------------------------------------------------------------------- //! Replace the current content by `str` of `len`. - ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) { + ASMJIT_INLINE bool setString(const char* str, size_t len = kInvalidIndex) noexcept { return _opString(kStringOpSet, str, len); } //! Replace the current content by formatted string `fmt`. - ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) { + ASMJIT_INLINE bool setVFormat(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpSet, fmt, ap); } //! Replace the current content by formatted string `fmt`. - ASMJIT_API bool setFormat(const char* fmt, ...); + ASMJIT_API bool setFormat(const char* fmt, ...) noexcept; //! Replace the current content by `c` character. - ASMJIT_INLINE bool setChar(char c) { + ASMJIT_INLINE bool setChar(char c) noexcept { return _opChar(kStringOpSet, c); } //! Replace the current content by `c` of `len`. - ASMJIT_INLINE bool setChars(char c, size_t len) { + ASMJIT_INLINE bool setChars(char c, size_t len) noexcept { return _opChars(kStringOpSet, c, len); } //! Replace the current content by formatted integer `i`. - ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + ASMJIT_INLINE bool setInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { return _opNumber(kStringOpSet, i, base, width, flags | kStringFormatSigned); } //! Replace the current content by formatted integer `i`. - ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + ASMJIT_INLINE bool setUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { return _opNumber(kStringOpSet, i, base, width, flags); } //! Replace the current content by the given `data` converted to a HEX string. - ASMJIT_INLINE bool setHex(const void* data, size_t len) { + ASMJIT_INLINE bool setHex(const void* data, size_t len) noexcept { return _opHex(kStringOpSet, data, len); } @@ -566,40 +355,40 @@ struct StringBuilder { // -------------------------------------------------------------------------- //! Append `str` of `len`. - ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) { + ASMJIT_INLINE bool appendString(const char* str, size_t len = kInvalidIndex) noexcept { return _opString(kStringOpAppend, str, len); } //! Append a formatted string `fmt` to the current content. - ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) { + ASMJIT_INLINE bool appendVFormat(const char* fmt, va_list ap) noexcept { return _opVFormat(kStringOpAppend, fmt, ap); } //! Append a formatted string `fmt` to the current content. - ASMJIT_API bool appendFormat(const char* fmt, ...); + ASMJIT_API bool appendFormat(const char* fmt, ...) noexcept; //! Append `c` character. - ASMJIT_INLINE bool appendChar(char c) { + ASMJIT_INLINE bool appendChar(char c) noexcept { return _opChar(kStringOpAppend, c); } //! Append `c` of `len`. - ASMJIT_INLINE bool appendChars(char c, size_t len) { + ASMJIT_INLINE bool appendChars(char c, size_t len) noexcept { return _opChars(kStringOpAppend, c, len); } //! Append `i`. - ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + ASMJIT_INLINE bool appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { return _opNumber(kStringOpAppend, static_cast(i), base, width, flags | kStringFormatSigned); } //! Append `i`. - ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) { + ASMJIT_INLINE bool appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept { return _opNumber(kStringOpAppend, i, base, width, flags); } //! Append the given `data` converted to a HEX string. - ASMJIT_INLINE bool appendHex(const void* data, size_t len) { + ASMJIT_INLINE bool appendHex(const void* data, size_t len) noexcept { return _opHex(kStringOpAppend, data, len); } @@ -608,7 +397,7 @@ struct StringBuilder { // -------------------------------------------------------------------------- //! Append `str` of `len`, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) { + ASMJIT_INLINE void _appendString(const char* str, size_t len = kInvalidIndex) noexcept { // len should be a constant if we are inlining. if (len == kInvalidIndex) { char* p = &_data[_length]; @@ -636,7 +425,7 @@ struct StringBuilder { } //! Append `c` character, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendChar(char c) { + ASMJIT_INLINE void _appendChar(char c) noexcept { ASMJIT_ASSERT(_capacity - _length >= 1); _data[_length] = c; @@ -645,7 +434,7 @@ struct StringBuilder { } //! Append `c` of `len`, inlined, without buffer overflow check. - ASMJIT_INLINE void _appendChars(char c, size_t len) { + ASMJIT_INLINE void _appendChars(char c, size_t len) noexcept { ASMJIT_ASSERT(_capacity - _length >= len); char* p = &_data[_length]; @@ -658,7 +447,7 @@ struct StringBuilder { _length += len; } - ASMJIT_INLINE void _appendUInt32(uint32_t i) { + ASMJIT_INLINE void _appendUInt32(uint32_t i) noexcept { char buf_[32]; char* pEnd = buf_ + ASMJIT_ARRAY_SIZE(buf_); @@ -688,19 +477,19 @@ struct StringBuilder { // -------------------------------------------------------------------------- //! Check for equality with other `str` of `len`. - ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const; + ASMJIT_API bool eq(const char* str, size_t len = kInvalidIndex) const noexcept; //! Check for equality with `other`. - ASMJIT_INLINE bool eq(const StringBuilder& other) const { return eq(other._data); } + ASMJIT_INLINE bool eq(const StringBuilder& other) const noexcept { return eq(other._data); } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool operator==(const StringBuilder& other) const { return eq(other); } - ASMJIT_INLINE bool operator!=(const StringBuilder& other) const { return !eq(other); } + ASMJIT_INLINE bool operator==(const StringBuilder& other) const noexcept { return eq(other); } + ASMJIT_INLINE bool operator!=(const StringBuilder& other) const noexcept { return !eq(other); } - ASMJIT_INLINE bool operator==(const char* str) const { return eq(str); } - ASMJIT_INLINE bool operator!=(const char* str) const { return !eq(str); } + ASMJIT_INLINE bool operator==(const char* str) const noexcept { return eq(str); } + ASMJIT_INLINE bool operator!=(const char* str) const noexcept { return !eq(str); } // -------------------------------------------------------------------------- // [Members] @@ -729,7 +518,7 @@ struct StringBuilderTmp : public StringBuilder { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE StringBuilderTmp() : StringBuilder(NoInit) { + ASMJIT_INLINE StringBuilderTmp() noexcept : StringBuilder(NoInit) { _data = _embeddedData; _data[0] = 0; diff --git a/src/asmjit/base/cpuinfo.cpp b/src/asmjit/base/cpuinfo.cpp index 8e0760d..dd61e12 100644 --- a/src/asmjit/base/cpuinfo.cpp +++ b/src/asmjit/base/cpuinfo.cpp @@ -9,14 +9,8 @@ // [Dependencies - AsmJit] #include "../base/cpuinfo.h" +#include "../base/utils.h" -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 -#include "../x86/x86cpuinfo.h" -#else -// ? -#endif - -// [Dependencies - Posix] #if ASMJIT_OS_POSIX # include # include @@ -24,53 +18,618 @@ # include #endif // ASMJIT_OS_POSIX +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 +# if ASMJIT_CC_MSC_GE(14, 0, 0) + # include // Required by `__cpuid()` and `_xgetbv()`. +# endif // _MSC_VER >= 1400 +#endif + +#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 +# if ASMJIT_OS_LINUX +# include // Required by `getauxval()`. +# endif +#endif + // [Api-Begin] #include "../apibegin.h" namespace asmjit { // ============================================================================ -// [asmjit::CpuInfo - DetectHwThreadsCount] +// [asmjit::CpuInfo - Detect ARM & ARM64] // ============================================================================ -uint32_t CpuInfo::detectHwThreadsCount() { +// ARM information has to be retrieved by the OS (this is how ARM was designed). +#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + +#if ASMJIT_ARCH_ARM64 +static void armPopulateBaseline64Features(CpuInfo* cpuInfo) noexcept { + // Thumb (including all variations) is only supported on ARM32. + + // ARM64 is based on ARMv8 and newer. + cpuInfo->addFeature(CpuInfo::kArmFeatureV6); + cpuInfo->addFeature(CpuInfo::kArmFeatureV7); + cpuInfo->addFeature(CpuInfo::kArmFeatureV8); + + // ARM64 comes with these features by default. + cpuInfo->addFeature(CpuInfo::kArmFeatureDSP); + cpuInfo->addFeature(CpuInfo::kArmFeatureIDIV); + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2); + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3); + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP4); +} +#endif // ASMJIT_ARCH_ARM64 + +#if ASMJIT_OS_WINDOWS +//! \internal +//! +//! Detect ARM CPU features on Windows. +//! +//! The detection is based on `IsProcessorFeaturePresent()` API call. +static void armDetectCpuInfoOnWindows(CpuInfo* cpuInfo) noexcept { +#if ASMJIT_ARCH_ARM32 + cpuInfo->setArch(kArchArm32); + + // Windows for ARM requires at least ARMv7 with DSP extensions. + cpuInfo->addFeature(CpuInfo::kArmFeatureV6); + cpuInfo->addFeature(CpuInfo::kArmFeatureV7); + cpuInfo->addFeature(CpuInfo::kArmFeatureDSP); + + // Windows for ARM requires VFP3. + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2); + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP3); + + // Windows for ARM requires and uses THUMB2. + cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB); + cpuInfo->addFeature(CpuInfo::kArmFeatureTHUMB2); +#else + cpuInfo->setArch(kArchArm64); + armPopulateBaseline64Features(cpuInfo); +#endif + + // Windows for ARM requires NEON. + cpuInfo->addFeature(CpuInfo::kArmFeatureNEON); + + // Detect additional CPU features by calling `IsProcessorFeaturePresent()`. + struct WinPFPMapping { + uint32_t pfpId, featureId; + }; + + static const WinPFPMapping mapping[] = { + { PF_ARM_FMAC_INSTRUCTIONS_AVAILABLE , CpuInfo::kArmFeatureVFP4 }, + { PF_ARM_VFP_32_REGISTERS_AVAILABLE , CpuInfo::kArmFeatureVFP_D32 }, + { PF_ARM_DIVIDE_INSTRUCTION_AVAILABLE, CpuInfo::kArmFeatureIDIV }, + { PF_ARM_64BIT_LOADSTORE_ATOMIC , CpuInfo::kArmFeatureAtomics64 } + }; + + for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(mapping); i++) + if (::IsProcessorFeaturePresent(mapping[i].pfpId)) + cpuInfo->addFeature(mapping[i].featureId); +} +#endif // ASMJIT_OS_WINDOWS + +#if ASMJIT_OS_LINUX +struct LinuxHWCapMapping { + uint32_t hwcapMask, featureId; +}; + +static void armDetectHWCaps(CpuInfo* cpuInfo, + unsigned long type, const LinuxHWCapMapping* mapping, size_t length) noexcept { + + unsigned long mask = getauxval(type); + for (size_t i = 0; i < length; i++) + if ((mask & mapping[i].hwcapMask) == mapping[i].hwcapMask) + cpuInfo->addFeature(mapping[i].featureId); +} + +//! \internal +//! +//! Detect ARM CPU features on Linux. +//! +//! The detection is based on `getauxval()`. +static void armDetectCpuInfoOnLinux(CpuInfo* cpuInfo) noexcept { +#if ASMJIT_ARCH_ARM32 + cpuInfo->setArch(kArchArm32); + + // `AT_HWCAP` provides ARMv7 (and less) related flags. + static const LinuxHWCapMapping hwCapMapping[] = { + { /* HWCAP_VFPv3 */ (1 << 13), CpuInfo::kArmFeatureVFP3 }, + { /* HWCAP_VFPv4 */ (1 << 16), CpuInfo::kArmFeatureVFP4 }, + { /* HWCAP_IDIVA */ (3 << 17), CpuInfo::kArmFeatureIDIV }, + { /* HWCAP_VFPD32 */ (1 << 19), CpuInfo::kArmFeatureVFP_D32 }, + { /* HWCAP_NEON */ (1 << 12), CpuInfo::kArmFeatureNEON }, + { /* HWCAP_EDSP */ (1 << 7), CpuInfo::kArmFeatureDSP } + }; + armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping)); + + // VFP3 implies VFP2. + if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3)) + cpuInfo->addFeature(CpuInfo::kArmFeatureVFP2); + + // VFP2 implies ARMv6. + if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP2)) + cpuInfo->addFeature(CpuInfo::kArmFeatureV6); + + // VFP3 or NEON implies ARMv7. + if (cpuInfo->hasFeature(CpuInfo::kArmFeatureVFP3) || + cpuInfo->hasFeature(CpuInfo::kArmFeatureNEON)) + cpuInfo->addFeature(CpuInfo::kArmFeatureV7); + + // `AT_HWCAP2` provides ARMv8 related flags. + static const LinuxHWCapMapping hwCap2Mapping[] = { + { /* HWCAP2_AES */ (1 << 0), CpuInfo::kArmFeatureAES }, + { /* HWCAP2_CRC32 */ (1 << 4), CpuInfo::kArmFeatureCRC32 }, + { /* HWCAP2_PMULL */ (1 << 1), CpuInfo::kArmFeaturePMULL }, + { /* HWCAP2_SHA1 */ (1 << 2), CpuInfo::kArmFeatureSHA1 }, + { /* HWCAP2_SHA2 */ (1 << 3), CpuInfo::kArmFeatureSHA256 } + }; + armDetectHWCaps(cpuInfo, AT_HWCAP2, hwCap2Mapping, ASMJIT_ARRAY_SIZE(hwCapMapping2)); + + if (cpuInfo->hasFeature(CpuInfo::kArmFeatureAES ) || + cpuInfo->hasFeature(CpuInfo::kArmFeatureCRC32 ) || + cpuInfo->hasFeature(CpuInfo::kArmFeaturePMULL ) || + cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA1 ) || + cpuInfo->hasFeature(CpuInfo::kArmFeatureSHA256)) { + cpuInfo->addFeature(CpuInfo::kArmFeatureV8); + } +#else + cpuInfo->setArch(kArchArm64); + armPopulateBaseline64Features(cpuInfo); + + // `AT_HWCAP` provides ARMv8 related flags. + static const LinuxHWCapMapping hwCapMapping[] = { + { /* HWCAP_ASIMD */ (1 << 1), CpuInfo::kArmFeatureNEON }, + { /* HWCAP_AES */ (1 << 3), CpuInfo::kArmFeatureAES }, + { /* HWCAP_CRC32 */ (1 << 7), CpuInfo::kArmFeatureCRC32 }, + { /* HWCAP_PMULL */ (1 << 4), CpuInfo::kArmFeaturePMULL }, + { /* HWCAP_SHA1 */ (1 << 5), CpuInfo::kArmFeatureSHA1 }, + { /* HWCAP_SHA2 */ (1 << 6), CpuInfo::kArmFeatureSHA256 } + { /* HWCAP_ATOMICS */ (1 << 8), CpuInfo::kArmFeatureAtomics64 } + }; + armDetectHWCaps(cpuInfo, AT_HWCAP, hwCapMapping, ASMJIT_ARRAY_SIZE(hwCapMapping)); + + // `AT_HWCAP2` is not used at the moment. +#endif +} +#endif // ASMJIT_OS_LINUX + +static void armDetectCpuInfo(CpuInfo* cpuInfo) noexcept { +#if ASMJIT_OS_WINDOWS + armDetectCpuInfoOnWindows(cpuInfo); +#elif ASMJIT_OS_LINUX + armDetectCpuInfoOnLinux(cpuInfo); +#else +# error "[asmjit] armDetectCpuInfo() - Unsupported OS." +#endif +} +#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + +// ============================================================================ +// [asmjit::CpuInfo - Detect X86 & X64] +// ============================================================================ + +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 + +//! \internal +//! +//! X86 CPUID result. +struct CpuIdResult { + uint32_t eax, ebx, ecx, edx; +}; + +//! \internal +//! +//! Content of XCR register, result of XGETBV instruction. +struct XGetBVResult { + uint32_t eax, edx; +}; + +#if ASMJIT_CC_MSC && !ASMJIT_CC_MSC_GE(15, 0, 30729) && ASMJIT_ARCH_X64 +//! \internal +//! +//! HACK: VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However, +//! 64-bit calling convention specifies the first parameter to be passed in +//! ECX, so we may be lucky if compiler doesn't move the register, otherwise +//! the result would be wrong. +static void ASMJIT_NOINLINE void x86CallCpuIdWorkaround(uint32_t inEcx, uint32_t inEax, CpuIdResult* result) noexcept { + __cpuid(reinterpret_cast(result), inEax); +} +#endif + +//! \internal +//! +//! Wrapper to call `cpuid` instruction. +static void ASMJIT_INLINE x86CallCpuId(CpuIdResult* result, uint32_t inEax, uint32_t inEcx = 0) noexcept { +#if ASMJIT_CC_MSC && ASMJIT_CC_MSC_GE(15, 0, 30729) + __cpuidex(reinterpret_cast(result), inEax, inEcx); +#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X64 + x86CallCpuIdWorkaround(inEcx, inEax, result); +#elif ASMJIT_CC_MSC && ASMJIT_ARCH_X86 + uint32_t paramEax = inEax; + uint32_t paramEcx = inEcx; + uint32_t* out = reinterpret_cast(result); + + __asm { + mov eax, paramEax + mov ecx, paramEcx + mov edi, out + cpuid + mov dword ptr[edi + 0], eax + mov dword ptr[edi + 4], ebx + mov dword ptr[edi + 8], ecx + mov dword ptr[edi + 12], edx + } +#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X86 + __asm__ __volatile__( + "mov %%ebx, %%edi\n" + "cpuid\n" + "xchg %%edi, %%ebx\n" + : "=a"(result->eax), + "=D"(result->ebx), + "=c"(result->ecx), + "=d"(result->edx) + : "a"(inEax), + "c"(inEcx) + ); +#elif (ASMJIT_CC_GCC || ASMJIT_CC_CLANG) && ASMJIT_ARCH_X64 + __asm__ __volatile__( \ + "mov %%rbx, %%rdi\n" + "cpuid\n" + "xchg %%rdi, %%rbx\n" + : "=a"(result->eax), + "=D"(result->ebx), + "=c"(result->ecx), + "=d"(result->edx) + : "a"(inEax), + "c"(inEcx) + ); +#else +# error "[asmjit] x86CallCpuid() - Unsupported compiler." +#endif +} + +//! \internal +//! +//! Wrapper to call `xgetbv` instruction. +static void x86CallXGetBV(XGetBVResult* result, uint32_t inEcx) noexcept { +#if ASMJIT_CC_MSC_GE(16, 0, 40219) // 2010SP1+ + uint64_t value = _xgetbv(inEcx); + result->eax = static_cast(value & 0xFFFFFFFFU); + result->edx = static_cast(value >> 32); +#elif ASMJIT_CC_GCC || ASMJIT_CC_CLANG + uint32_t outEax; + uint32_t outEdx; + + // Replaced, because the world is not perfect: + // __asm__ __volatile__("xgetbv" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx)); + __asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(outEax), "=d"(outEdx) : "c"(inEcx)); + + result->eax = outEax; + result->edx = outEdx; +#else + result->eax = 0; + result->edx = 0; +#endif +} + +//! \internal +//! +//! Map a 12-byte vendor string returned by `cpuid` into a `CpuInfo::Vendor` ID. +static uint32_t x86GetCpuVendorID(const char* vendorString) noexcept { + struct VendorData { + uint32_t id; + char text[12]; + }; + + static const VendorData vendorList[] = { + { CpuInfo::kVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } }, + { CpuInfo::kVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } }, + { CpuInfo::kVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } }, + { CpuInfo::kVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } } + }; + + uint32_t dw0 = reinterpret_cast(vendorString)[0]; + uint32_t dw1 = reinterpret_cast(vendorString)[1]; + uint32_t dw2 = reinterpret_cast(vendorString)[2]; + + for (uint32_t i = 0; i < ASMJIT_ARRAY_SIZE(vendorList); i++) { + if (dw0 == reinterpret_cast(vendorList[i].text)[0] && + dw1 == reinterpret_cast(vendorList[i].text)[1] && + dw2 == reinterpret_cast(vendorList[i].text)[2]) + return vendorList[i].id; + } + + return CpuInfo::kVendorNone; +} + +static ASMJIT_INLINE void x86SimplifyBrandString(char* s) noexcept { + // Used to always clear the current character to ensure that the result + // doesn't contain garbage after the new zero terminator. + char* d = s; + + char prev = 0; + char curr = s[0]; + s[0] = '\0'; + + for (;;) { + if (curr == 0) + break; + + if (curr == ' ') { + if (prev == '@' || s[1] == ' ' || s[1] == '@') + goto _Skip; + } + + d[0] = curr; + d++; + prev = curr; + +_Skip: + curr = *++s; + s[0] = '\0'; + } + + d[0] = '\0'; +} + +static void x86DetectCpuInfo(CpuInfo* cpuInfo) noexcept { + uint32_t i, maxId; + + CpuIdResult regs; + XGetBVResult xcr0 = { 0, 0 }; + + // Architecture is known at compile-time. + cpuInfo->setArch(ASMJIT_ARCH_X86 ? kArchX86 : kArchX64); + + // -------------------------------------------------------------------------- + // [CPUID EAX=0x0] + // -------------------------------------------------------------------------- + + // Get vendor string/id. + x86CallCpuId(®s, 0x0); + + maxId = regs.eax; + ::memcpy(cpuInfo->_vendorString + 0, ®s.ebx, 4); + ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4); + ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4); + cpuInfo->_vendorId = x86GetCpuVendorID(cpuInfo->_vendorString); + + // -------------------------------------------------------------------------- + // [CPUID EAX=0x1] + // -------------------------------------------------------------------------- + + if (maxId >= 0x1) { + // Get feature flags in ECX/EDX and family/model in EAX. + x86CallCpuId(®s, 0x1); + + // Fill family and model fields. + cpuInfo->_family = (regs.eax >> 8) & 0x0F; + cpuInfo->_model = (regs.eax >> 4) & 0x0F; + cpuInfo->_stepping = (regs.eax ) & 0x0F; + + // Use extended family and model fields. + if (cpuInfo->_family == 0x0F) { + cpuInfo->_family += ((regs.eax >> 20) & 0xFF); + cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4; + } + + cpuInfo->_x86Data._processorType = ((regs.eax >> 12) & 0x03); + cpuInfo->_x86Data._brandIndex = ((regs.ebx ) & 0xFF); + cpuInfo->_x86Data._flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8; + cpuInfo->_x86Data._maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF); + + if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE3); + if (regs.ecx & 0x00000002U) cpuInfo->addFeature(CpuInfo::kX86FeaturePCLMULQDQ); + if (regs.ecx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureMONITOR); + if (regs.ecx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSSE3); + if (regs.ecx & 0x00002000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG16B); + if (regs.ecx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_1); + if (regs.ecx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4_2); + if (regs.ecx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVBE); + if (regs.ecx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeaturePOPCNT); + if (regs.ecx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAESNI); + if (regs.ecx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE); + if (regs.ecx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureXSAVE_OS); + if (regs.ecx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDRAND); + + if (regs.edx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSC); + if (regs.edx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMPXCHG8B); + if (regs.edx & 0x00008000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCMOV); + if (regs.edx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH); + if (regs.edx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX); + if (regs.edx & 0x01000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR); + if (regs.edx & 0x02000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE) + .addFeature(CpuInfo::kX86FeatureMMX2); + if (regs.edx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE) + .addFeature(CpuInfo::kX86FeatureSSE2); + if (regs.edx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMT); + + // AMD sets Multithreading to ON if it has two or more cores. + if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == CpuInfo::kVendorAMD && (regs.edx & 0x10000000U)) { + cpuInfo->_hwThreadsCount = 2; + } + + // Get the content of XCR0 if supported by CPU and enabled by OS. + if ((regs.ecx & 0x0C000000U) == 0x0C000000U) { + x86CallXGetBV(&xcr0, 0); + } + + // Detect AVX+. + if (regs.ecx & 0x10000000U) { + // - XCR0[2:1] == 11b + // XMM & YMM states need to be enabled by OS. + if ((xcr0.eax & 0x00000006U) == 0x00000006U) { + cpuInfo->addFeature(CpuInfo::kX86FeatureAVX); + + if (regs.ecx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureXOP); + if (regs.ecx & 0x00004000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA3); + if (regs.ecx & 0x00010000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFMA4); + if (regs.ecx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureF16C); + } + } + } + + // -------------------------------------------------------------------------- + // [CPUID EAX=0x7 ECX=0x0] + // -------------------------------------------------------------------------- + + // Detect new features if the processor supports CPUID-07. + bool maybeMPX = false; + if (maxId >= 0x7) { + x86CallCpuId(®s, 0x7); + + if (regs.ebx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureFSGSBASE); + if (regs.ebx & 0x00000008U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI); + if (regs.ebx & 0x00000010U) cpuInfo->addFeature(CpuInfo::kX86FeatureHLE); + if (regs.ebx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeatureBMI2); + if (regs.ebx & 0x00000200U) cpuInfo->addFeature(CpuInfo::kX86FeatureMOVSBSTOSB_OPT); + if (regs.ebx & 0x00000800U) cpuInfo->addFeature(CpuInfo::kX86FeatureRTM); + if (regs.ebx & 0x00004000U) maybeMPX = true; + if (regs.ebx & 0x00040000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDSEED); + if (regs.ebx & 0x00080000U) cpuInfo->addFeature(CpuInfo::kX86FeatureADX); + if (regs.ebx & 0x00800000U) cpuInfo->addFeature(CpuInfo::kX86FeatureCLFLUSH_OPT); + if (regs.ebx & 0x20000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureSHA); + + if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCHWT1); + + // Detect AVX2. + if (cpuInfo->hasFeature(CpuInfo::kX86FeatureAVX)) { + if (regs.ebx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX2); + } + + // Detect AVX-512+. + if (regs.ebx & 0x00010000U) { + // - XCR0[2:1] == 11b + // XMM/YMM states need to be enabled by OS. + // - XCR0[7:5] == 111b + // Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 need to be enabled by OS. + if ((xcr0.eax & 0x00000076U) == 0x00000076U) { + cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512F); + + if (regs.ebx & 0x00020000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512DQ); + if (regs.ebx & 0x04000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512PF); + if (regs.ebx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512ER); + if (regs.ebx & 0x10000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512CD); + if (regs.ebx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512BW); + if (regs.ebx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureAVX512VL); + } + } + } + + // -------------------------------------------------------------------------- + // [CPUID EAX=0xD, ECX=0x0] + // -------------------------------------------------------------------------- + + if (maxId >= 0xD && maybeMPX) { + x86CallCpuId(®s, 0xD); + + // Both CPUID result and XCR0 has to be enabled to have support for MPX. + if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U) { + cpuInfo->addFeature(CpuInfo::kX86FeatureMPX); + } + } + + // -------------------------------------------------------------------------- + // [CPUID EAX=0x80000000...maxId] + // -------------------------------------------------------------------------- + + // Several CPUID calls are required to get the whole branc string. It's easy + // to copy one DWORD at a time instead of performing a byte copy. + uint32_t* brand = reinterpret_cast(cpuInfo->_brandString); + + i = maxId = 0x80000000U; + do { + x86CallCpuId(®s, i); + switch (i) { + case 0x80000000U: + maxId = Utils::iMin(regs.eax, 0x80000004); + break; + + case 0x80000001U: + if (regs.ecx & 0x00000001U) cpuInfo->addFeature(CpuInfo::kX86FeatureLAHF_SAHF); + if (regs.ecx & 0x00000020U) cpuInfo->addFeature(CpuInfo::kX86FeatureLZCNT); + if (regs.ecx & 0x00000040U) cpuInfo->addFeature(CpuInfo::kX86FeatureSSE4A); + if (regs.ecx & 0x00000080U) cpuInfo->addFeature(CpuInfo::kX86FeatureMSSE); + if (regs.ecx & 0x00000100U) cpuInfo->addFeature(CpuInfo::kX86FeaturePREFETCH); + + if (regs.edx & 0x00100000U) cpuInfo->addFeature(CpuInfo::kX86FeatureNX); + if (regs.edx & 0x00200000U) cpuInfo->addFeature(CpuInfo::kX86FeatureFXSR_OPT); + if (regs.edx & 0x00400000U) cpuInfo->addFeature(CpuInfo::kX86FeatureMMX2); + if (regs.edx & 0x08000000U) cpuInfo->addFeature(CpuInfo::kX86FeatureRDTSCP); + if (regs.edx & 0x40000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW2) + .addFeature(CpuInfo::kX86FeatureMMX2); + if (regs.edx & 0x80000000U) cpuInfo->addFeature(CpuInfo::kX86Feature3DNOW); + break; + + case 0x80000002U: + case 0x80000003U: + case 0x80000004U: + *brand++ = regs.eax; + *brand++ = regs.ebx; + *brand++ = regs.ecx; + *brand++ = regs.edx; + break; + + default: + // Stop the loop, additional features can be detected in the future. + i = maxId; + break; + } + } while (i++ < maxId); + + // Simplify CPU brand string by removing unnecessary spaces. + x86SimplifyBrandString(cpuInfo->_brandString); +} +#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 + +// ============================================================================ +// [asmjit::CpuInfo - Detect - HWThreadsCount] +// ============================================================================ + +static uint32_t cpuDetectHWThreadsCount() noexcept { #if ASMJIT_OS_WINDOWS SYSTEM_INFO info; ::GetSystemInfo(&info); return info.dwNumberOfProcessors; #elif ASMJIT_OS_POSIX && defined(_SC_NPROCESSORS_ONLN) - // It seems that sysconf returns the number of "logical" processors on both - // mac and linux. So we get the number of "online logical" processors. long res = ::sysconf(_SC_NPROCESSORS_ONLN); - if (res == -1) return 1; - + if (res <= 0) return 1; return static_cast(res); #else return 1; #endif } +// ============================================================================ +// [asmjit::CpuInfo - Detect] +// ============================================================================ + +void CpuInfo::detect() noexcept { + reset(); + + // Detect the number of hardware threads available. + _hwThreadsCount = cpuDetectHWThreadsCount(); + +#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + armDetectCpuInfo(this); +#endif // ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 + x86DetectCpuInfo(this); +#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 +} + // ============================================================================ // [asmjit::CpuInfo - GetHost] // ============================================================================ -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 -struct AutoX86CpuInfo : public X86CpuInfo { - ASMJIT_INLINE AutoX86CpuInfo() : X86CpuInfo() { - X86CpuUtil::detect(this); - } +struct HostCpuInfo : public CpuInfo { + ASMJIT_INLINE HostCpuInfo() noexcept : CpuInfo() { detect(); } }; -#else -#error "[asmjit] Unsupported CPU." -#endif -const CpuInfo* CpuInfo::getHost() { -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - static AutoX86CpuInfo cpuInfo; -#else -#error "[asmjit] Unsupported CPU." -#endif - return &cpuInfo; +const CpuInfo& CpuInfo::getHost() noexcept { + static HostCpuInfo host; + return host; } } // asmjit namespace diff --git a/src/asmjit/base/cpuinfo.h b/src/asmjit/base/cpuinfo.h index 0def203..9a77fcd 100644 --- a/src/asmjit/base/cpuinfo.h +++ b/src/asmjit/base/cpuinfo.h @@ -19,107 +19,357 @@ namespace asmjit { //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::CpuVendor] -// ============================================================================ - -//! Cpu vendor ID. -//! -//! Vendor IDs are specific to AsmJit library. During the library initialization -//! AsmJit checks host CPU and tries to identify the vendor based on the CPUID -//! calls. Some manufacturers changed their vendor strings and AsmJit is aware -//! of that - it checks multiple combinations and decides which vendor ID should -//! be used. -ASMJIT_ENUM(CpuVendor) { - //! No/Unknown vendor. - kCpuVendorNone = 0, - - //! Intel vendor. - kCpuVendorIntel = 1, - //! AMD vendor. - kCpuVendorAMD = 2, - //! VIA vendor. - kCpuVendorVIA = 3 -}; - // ============================================================================ // [asmjit::CpuInfo] // ============================================================================ -//! Base cpu information. +//! CPU information. struct CpuInfo { - ASMJIT_NO_COPY(CpuInfo) + // -------------------------------------------------------------------------- + // [Vendor] + // -------------------------------------------------------------------------- + + //! CPU vendor ID. + ASMJIT_ENUM(Vendor) { + //! Generic or unknown. + kVendorNone = 0, + + //! Intel vendor. + kVendorIntel = 1, + //! AMD vendor. + kVendorAMD = 2, + //! VIA vendor. + kVendorVIA = 3 + }; + + // -------------------------------------------------------------------------- + // [ArmFeatures] + // -------------------------------------------------------------------------- + + //! ARM/ARM64 CPU features. + ASMJIT_ENUM(ArmFeatures) { + //! ARMv6 instruction set. + kArmFeatureV6, + //! ARMv7 instruction set. + kArmFeatureV7, + //! ARMv8 instruction set. + kArmFeatureV8, + + //! CPU provides THUMB v1 instruction set (ARM only). + kArmFeatureTHUMB, + //! CPU provides THUMB v2 instruction set (ARM only). + kArmFeatureTHUMB2, + + //! CPU provides VFPv2 instruction set. + kArmFeatureVFP2, + //! CPU provides VFPv3 instruction set. + kArmFeatureVFP3, + //! CPU provides VFPv4 instruction set. + kArmFeatureVFP4, + //! CPU provides 32 VFP-D (64-bit) registers. + kArmFeatureVFP_D32, + + //! CPU provides NEON instruction set. + kArmFeatureNEON, + + //! CPU provides DSP extensions. + kArmFeatureDSP, + //! CPU provides hardware support for SDIV and UDIV. + kArmFeatureIDIV, + + //! CPU provides AES instructions (ARM64 only). + kArmFeatureAES, + //! CPU provides CRC32 instructions (ARM64 only). + kArmFeatureCRC32, + //! CPU provides PMULL instructions (ARM64 only). + kArmFeaturePMULL, + //! CPU provides SHA1 instructions (ARM64 only). + kArmFeatureSHA1, + //! CPU provides SHA256 instructions (ARM64 only). + kArmFeatureSHA256, + //! CPU provides 64-bit load/store atomics (ARM64 only). + kArmFeatureAtomics64, + + //! Count of ARM/ARM64 CPU features. + kArmFeaturesCount + }; + + // -------------------------------------------------------------------------- + // [X86Features] + // -------------------------------------------------------------------------- + + //! X86/X64 CPU features. + ASMJIT_ENUM(X86Features) { + //! Cpu has Not-Execute-Bit. + kX86FeatureNX = 0, + //! Cpu has multithreading. + kX86FeatureMT, + //! Cpu has RDTSC. + kX86FeatureRDTSC, + //! Cpu has RDTSCP. + kX86FeatureRDTSCP, + //! Cpu has CMOV. + kX86FeatureCMOV, + //! Cpu has CMPXCHG8B. + kX86FeatureCMPXCHG8B, + //! Cpu has CMPXCHG16B (X64). + kX86FeatureCMPXCHG16B, + //! Cpu has CLFUSH. + kX86FeatureCLFLUSH, + //! Cpu has CLFUSH (Optimized). + kX86FeatureCLFLUSH_OPT, + //! Cpu has PREFETCH. + kX86FeaturePREFETCH, + //! Cpu has PREFETCHWT1. + kX86FeaturePREFETCHWT1, + //! Cpu has LAHF/SAHF. + kX86FeatureLAHF_SAHF, + //! Cpu has FXSAVE/FXRSTOR. + kX86FeatureFXSR, + //! Cpu has FXSAVE/FXRSTOR (Optimized). + kX86FeatureFXSR_OPT, + //! Cpu has MMX. + kX86FeatureMMX, + //! Cpu has extended MMX. + kX86FeatureMMX2, + //! Cpu has 3dNow! + kX86Feature3DNOW, + //! Cpu has enchanced 3dNow! + kX86Feature3DNOW2, + //! Cpu has SSE. + kX86FeatureSSE, + //! Cpu has SSE2. + kX86FeatureSSE2, + //! Cpu has SSE3. + kX86FeatureSSE3, + //! Cpu has SSSE3. + kX86FeatureSSSE3, + //! Cpu has SSE4.A. + kX86FeatureSSE4A, + //! Cpu has SSE4.1. + kX86FeatureSSE4_1, + //! Cpu has SSE4.2. + kX86FeatureSSE4_2, + //! Cpu has Misaligned SSE (MSSE). + kX86FeatureMSSE, + //! Cpu has MONITOR and MWAIT. + kX86FeatureMONITOR, + //! Cpu has MOVBE. + kX86FeatureMOVBE, + //! Cpu has POPCNT. + kX86FeaturePOPCNT, + //! Cpu has LZCNT. + kX86FeatureLZCNT, + //! Cpu has AESNI. + kX86FeatureAESNI, + //! Cpu has PCLMULQDQ. + kX86FeaturePCLMULQDQ, + //! Cpu has RDRAND. + kX86FeatureRDRAND, + //! Cpu has RDSEED. + kX86FeatureRDSEED, + //! Cpu has SHA-1 and SHA-256. + kX86FeatureSHA, + //! Cpu has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0. + kX86FeatureXSAVE, + //! OS has enabled XSAVE, you can call XGETBV to get value of XCR0. + kX86FeatureXSAVE_OS, + //! Cpu has AVX. + kX86FeatureAVX, + //! Cpu has AVX2. + kX86FeatureAVX2, + //! Cpu has F16C. + kX86FeatureF16C, + //! Cpu has FMA3. + kX86FeatureFMA3, + //! Cpu has FMA4. + kX86FeatureFMA4, + //! Cpu has XOP. + kX86FeatureXOP, + //! Cpu has BMI. + kX86FeatureBMI, + //! Cpu has BMI2. + kX86FeatureBMI2, + //! Cpu has HLE. + kX86FeatureHLE, + //! Cpu has RTM. + kX86FeatureRTM, + //! Cpu has ADX. + kX86FeatureADX, + //! Cpu has MPX (Memory Protection Extensions). + kX86FeatureMPX, + //! Cpu has FSGSBASE. + kX86FeatureFSGSBASE, + //! Cpu has optimized REP MOVSB/STOSB. + kX86FeatureMOVSBSTOSB_OPT, + + //! Cpu has AVX-512F (Foundation). + kX86FeatureAVX512F, + //! Cpu has AVX-512CD (Conflict Detection). + kX86FeatureAVX512CD, + //! Cpu has AVX-512PF (Prefetch Instructions). + kX86FeatureAVX512PF, + //! Cpu has AVX-512ER (Exponential and Reciprocal Instructions). + kX86FeatureAVX512ER, + //! Cpu has AVX-512DQ (DWord/QWord). + kX86FeatureAVX512DQ, + //! Cpu has AVX-512BW (Byte/Word). + kX86FeatureAVX512BW, + //! Cpu has AVX VL (Vector Length Excensions). + kX86FeatureAVX512VL, + + //! Count of X86/X64 CPU features. + kX86FeaturesCount + }; + + // -------------------------------------------------------------------------- + // [Other] + // -------------------------------------------------------------------------- //! \internal enum { kFeaturesPerUInt32 = static_cast(sizeof(uint32_t)) * 8 }; + // -------------------------------------------------------------------------- + // [ArmInfo] + // -------------------------------------------------------------------------- + + struct ArmData { + }; + + // -------------------------------------------------------------------------- + // [X86Info] + // -------------------------------------------------------------------------- + + struct X86Data { + //! Processor type. + uint32_t _processorType; + //! Brand index. + uint32_t _brandIndex; + //! Flush cache line size in bytes. + uint32_t _flushCacheLineSize; + //! Maximum number of addressable IDs for logical processors. + uint32_t _maxLogicalProcessors; + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE CpuInfo(uint32_t size) : _size(size) {} + ASMJIT_INLINE CpuInfo() noexcept { reset(); } + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + + ASMJIT_INLINE void reset() noexcept { + ::memset(this, 0, sizeof(CpuInfo)); + } + + // -------------------------------------------------------------------------- + // [Detect] + // -------------------------------------------------------------------------- + + ASMJIT_API void detect() noexcept; // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- + //! Get CPU architecture, see \Arch. + ASMJIT_INLINE uint32_t getArch() const noexcept { return _arch; } + //! Set CPU architecture, see \Arch. + ASMJIT_INLINE void setArch(uint32_t arch) noexcept { _arch = static_cast(arch); } + //! Get CPU vendor string. - ASMJIT_INLINE const char* getVendorString() const { return _vendorString; } + ASMJIT_INLINE const char* getVendorString() const noexcept { return _vendorString; } //! Get CPU brand string. - ASMJIT_INLINE const char* getBrandString() const { return _brandString; } + ASMJIT_INLINE const char* getBrandString() const noexcept { return _brandString; } //! Get CPU vendor ID. - ASMJIT_INLINE uint32_t getVendorId() const { return _vendorId; } + ASMJIT_INLINE uint32_t getVendorId() const noexcept { return _vendorId; } //! Get CPU family ID. - ASMJIT_INLINE uint32_t getFamily() const { return _family; } + ASMJIT_INLINE uint32_t getFamily() const noexcept { return _family; } //! Get CPU model ID. - ASMJIT_INLINE uint32_t getModel() const { return _model; } + ASMJIT_INLINE uint32_t getModel() const noexcept { return _model; } //! Get CPU stepping. - ASMJIT_INLINE uint32_t getStepping() const { return _stepping; } + ASMJIT_INLINE uint32_t getStepping() const noexcept { return _stepping; } //! Get number of hardware threads available. - ASMJIT_INLINE uint32_t getHwThreadsCount() const { return _hwThreadsCount; } + ASMJIT_INLINE uint32_t getHwThreadsCount() const noexcept { + return _hwThreadsCount; + } //! Get whether CPU has a `feature`. - ASMJIT_INLINE bool hasFeature(uint32_t feature) const { + ASMJIT_INLINE bool hasFeature(uint32_t feature) const noexcept { ASMJIT_ASSERT(feature < sizeof(_features) * 8); - return static_cast( - (_features[feature / kFeaturesPerUInt32] >> (feature % kFeaturesPerUInt32)) & 0x1); + uint32_t pos = feature / kFeaturesPerUInt32; + uint32_t bit = feature % kFeaturesPerUInt32; + + return static_cast((_features[pos] >> bit) & 0x1); } //! Add a CPU `feature`. - ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) { + ASMJIT_INLINE CpuInfo& addFeature(uint32_t feature) noexcept { ASMJIT_ASSERT(feature < sizeof(_features) * 8); - _features[feature / kFeaturesPerUInt32] |= (1U << (feature % kFeaturesPerUInt32)); + uint32_t pos = feature / kFeaturesPerUInt32; + uint32_t bit = feature % kFeaturesPerUInt32; + + _features[pos] |= static_cast(1) << bit; return *this; } + // -------------------------------------------------------------------------- + // [Accessors - ARM] + // -------------------------------------------------------------------------- + + // -------------------------------------------------------------------------- + // [Accessors - X86] + // -------------------------------------------------------------------------- + + //! Get processor type. + ASMJIT_INLINE uint32_t getX86ProcessorType() const noexcept { + return _x86Data._processorType; + } + + //! Get brand index. + ASMJIT_INLINE uint32_t getX86BrandIndex() const noexcept { + return _x86Data._brandIndex; + } + + //! Get flush cache line size. + ASMJIT_INLINE uint32_t getX86FlushCacheLineSize() const noexcept { + return _x86Data._flushCacheLineSize; + } + + //! Get maximum logical processors count. + ASMJIT_INLINE uint32_t getX86MaxLogicalProcessors() const noexcept { + return _x86Data._maxLogicalProcessors; + } + // -------------------------------------------------------------------------- // [Statics] // -------------------------------------------------------------------------- - //! Detect the number of hardware threads. - static ASMJIT_API uint32_t detectHwThreadsCount(); - //! Get host cpu. - static ASMJIT_API const CpuInfo* getHost(); + static ASMJIT_API const CpuInfo& getHost() noexcept; // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- - //! Size of the structure in bytes. - uint32_t _size; - - //! Cpu short vendor string. + //! Cpu vendor string. char _vendorString[16]; - //! Cpu long vendor string (brand). + //! Cpu brand string. char _brandString[64]; + //! CPU architecture, see \ref Arch. + uint8_t _arch; + //! \internal + uint8_t _reserved[3]; //! Cpu vendor id, see \ref CpuVendor. uint32_t _vendorId; //! Cpu family ID. @@ -134,6 +384,12 @@ struct CpuInfo { //! Cpu features bitfield. uint32_t _features[4]; + + // Architecture specific data. + union { + ArmData _armData; + X86Data _x86Data; + }; }; //! \} diff --git a/src/asmjit/base/globals.cpp b/src/asmjit/base/globals.cpp index b442901..3f51dca 100644 --- a/src/asmjit/base/globals.cpp +++ b/src/asmjit/base/globals.cpp @@ -19,7 +19,7 @@ namespace asmjit { // [asmjit::DebugUtils] // ============================================================================ -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) static const char errorMessages[] = { "Ok\0" "No heap memory\0" @@ -39,7 +39,7 @@ static const char errorMessages[] = { "Unknown error\0" }; -static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) { +static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) noexcept { uint32_t i = 0; if (id > maxId) @@ -55,10 +55,10 @@ static const char* findPackedString(const char* p, uint32_t id, uint32_t maxId) return p; } -#endif // ASMJIT_DISABLE_NAMES +#endif // ASMJIT_DISABLE_TEXT -const char* DebugUtils::errorAsString(Error e) { -#if !defined(ASMJIT_DISABLE_NAMES) +const char* DebugUtils::errorAsString(Error e) noexcept { +#if !defined(ASMJIT_DISABLE_TEXT) return findPackedString(errorMessages, e, kErrorCount); #else static const char noMessage[] = ""; @@ -66,7 +66,7 @@ const char* DebugUtils::errorAsString(Error e) { #endif } -void DebugUtils::debugOutput(const char* str) { +void DebugUtils::debugOutput(const char* str) noexcept { #if ASMJIT_OS_WINDOWS ::OutputDebugStringA(str); #else @@ -74,7 +74,7 @@ void DebugUtils::debugOutput(const char* str) { #endif } -void DebugUtils::assertionFailed(const char* file, int line, const char* msg) { +void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept { char str[1024]; snprintf(str, 1024, diff --git a/src/asmjit/base/globals.h b/src/asmjit/base/globals.h index 4762e47..61f8e43 100644 --- a/src/asmjit/base/globals.h +++ b/src/asmjit/base/globals.h @@ -89,27 +89,25 @@ ASMJIT_ENUM(ArchId) { kArchX86 = 1, //! X64 architecture (64-bit), also called AMD64. kArchX64 = 2, - //! X32 architecture (64-bit with 32-bit pointers) (NOT USED ATM). kArchX32 = 3, //! Arm architecture (32-bit). - kArchArm = 4, + kArchArm32 = 4, //! Arm64 architecture (64-bit). kArchArm64 = 5, #if ASMJIT_ARCH_X86 - kArchHost = kArchX86, + kArchHost = kArchX86 #elif ASMJIT_ARCH_X64 - kArchHost = kArchX64, -#elif ASMJIT_ARCH_ARM - kArchHost = kArchArm, + kArchHost = kArchX64 +#elif ASMJIT_ARCH_ARM32 + kArchHost = kArchArm32 #elif ASMJIT_ARCH_ARM64 - kArchHost = kArchArm64, + kArchHost = kArchArm64 +#else +# error "[asmjit] Unsupported host architecture." #endif - - //! Whether the host is 64-bit. - kArchHost64Bit = sizeof(intptr_t) >= 8 }; // ============================================================================ @@ -184,8 +182,8 @@ ASMJIT_ENUM(ErrorCode) { //! Illegal (unencodable) displacement used. //! - //! X86/X64 - //! ------- + //! X86/X64 Specific + //! ---------------- //! //! Short form of jump instruction has been used, but the displacement is out //! of bounds. @@ -219,14 +217,14 @@ static const _NoInit NoInit = {}; namespace DebugUtils { //! Get a printable version of AsmJit `Error` code. -ASMJIT_API const char* errorAsString(Error code); +ASMJIT_API const char* errorAsString(Error code) noexcept; //! \addtogroup asmjit_base //! \{ //! Called in debug build to output a debugging message caused by assertion //! failure or tracing. -ASMJIT_API void debugOutput(const char* str); +ASMJIT_API void debugOutput(const char* str) noexcept; //! Called in debug build on assertion failure. //! @@ -237,7 +235,7 @@ ASMJIT_API void debugOutput(const char* str); //! If you have problems with assertions put a breakpoint at assertionFailed() //! function (asmjit/base/globals.cpp) and check the call stack to locate the //! failing code. -ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg); +ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept; //! \} @@ -297,7 +295,7 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons //! cross-platform software with various compiler support, consider using //! `asmjit_cast<>` instead of `reinterpret_cast<>`. template -static ASMJIT_INLINE T asmjit_cast(Z* p) { return (T)p; } +static ASMJIT_INLINE T asmjit_cast(Z* p) noexcept { return (T)p; } //! \} diff --git a/src/asmjit/base/hlstream.h b/src/asmjit/base/hlstream.h index e715a55..58a9a63 100644 --- a/src/asmjit/base/hlstream.h +++ b/src/asmjit/base/hlstream.h @@ -40,95 +40,95 @@ struct HLSentinel; //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::HLNodeType] -// ============================================================================ - -//! Type of \ref HLNode. -ASMJIT_ENUM(HLNodeType) { - //! Invalid node (internal, don't use). - kHLNodeTypeNone = 0, - - // -------------------------------------------------------------------------- - // [Low-Level - Assembler / Compiler] - // -------------------------------------------------------------------------- - - //! Node is \ref HLInst or \ref HLJump. - kHLNodeTypeInst, - //! Node is \ref HLData. - kHLNodeTypeData, - //! Node is \ref HLAlign. - kHLNodeTypeAlign, - //! Node is \ref HLLabel. - kHLNodeTypeLabel, - //! Node is \ref HLComment. - kHLNodeTypeComment, - //! Node is \ref HLSentinel. - kHLNodeTypeSentinel, - - // -------------------------------------------------------------------------- - // [High-Level - Compiler-Only] - // -------------------------------------------------------------------------- - - //! Node is \ref HLHint. - kHLNodeTypeHint, - //! Node is \ref HLFunc. - kHLNodeTypeFunc, - //! Node is \ref HLRet. - kHLNodeTypeRet, - //! Node is \ref HLCall. - kHLNodeTypeCall, - //! Node is \ref HLCallArg. - kHLNodeTypeCallArg -}; - -// ============================================================================ -// [asmjit::HLNodeFlags] -// ============================================================================ - -ASMJIT_ENUM(HLNodeFlags) { - //! Whether the node has been translated, thus contains only registers. - kHLNodeFlagIsTranslated = 0x0001, - - //! Whether the node was scheduled - possibly reordered, but basically this - //! is a mark that is set by scheduler after the node has been visited. - kHLNodeFlagIsScheduled = 0x0002, - - //! Whether the node is informative only and can be safely removed. - kHLNodeFlagIsInformative = 0x0004, - - //! Whether the `HLInst` is a jump. - kHLNodeFlagIsJmp = 0x0008, - //! Whether the `HLInst` is a conditional jump. - kHLNodeFlagIsJcc = 0x0010, - - //! Whether the `HLInst` is an unconditinal jump or conditional jump that is - //! likely to be taken. - kHLNodeFlagIsTaken = 0x0020, - - //! Whether the `HLNode` will return from a function. - //! - //! This flag is used by both `HLSentinel` and `HLRet`. - kHLNodeFlagIsRet = 0x0040, - - //! Whether the instruction is special. - kHLNodeFlagIsSpecial = 0x0080, - - //! Whether the instruction is an FPU instruction. - kHLNodeFlagIsFp = 0x0100 -}; - // ============================================================================ // [asmjit::HLNode] // ============================================================================ -//! Assembler stream (AS) node. +//! Base node (HL). //! -//! Every node represents an abstract instruction, directive, label, or -//! macro-instruction generated by the `Compiler` or other code generator. +//! Every node represents an abstract instruction, directive, label, or macro +//! instruction that can be serialized to `Assembler`. struct HLNode { ASMJIT_NO_COPY(HLNode) + // -------------------------------------------------------------------------- + // [Type] + // -------------------------------------------------------------------------- + + //! Type of \ref HLNode. + ASMJIT_ENUM(Type) { + //! Invalid node (internal, don't use). + kTypeNone = 0, + + // -------------------------------------------------------------------------- + // [Low-Level - Assembler / Compiler] + // -------------------------------------------------------------------------- + + //! Node is \ref HLInst or \ref HLJump. + kTypeInst, + //! Node is \ref HLData. + kTypeData, + //! Node is \ref HLAlign. + kTypeAlign, + //! Node is \ref HLLabel. + kTypeLabel, + //! Node is \ref HLComment. + kTypeComment, + //! Node is \ref HLSentinel. + kTypeSentinel, + + // -------------------------------------------------------------------------- + // [High-Level - Compiler-Only] + // -------------------------------------------------------------------------- + + //! Node is \ref HLHint. + kTypeHint, + //! Node is \ref HLFunc. + kTypeFunc, + //! Node is \ref HLRet. + kTypeRet, + //! Node is \ref HLCall. + kTypeCall, + //! Node is \ref HLCallArg. + kTypeCallArg + }; + + // -------------------------------------------------------------------------- + // [Flags] + // -------------------------------------------------------------------------- + + ASMJIT_ENUM(Flags) { + //! Whether the node has been translated, thus contains only registers. + kFlagIsTranslated = 0x0001, + + //! Whether the node was scheduled - possibly reordered, but basically this + //! is a mark that is set by scheduler after the node has been visited. + kFlagIsScheduled = 0x0002, + + //! Whether the node is informative only and can be safely removed. + kFlagIsInformative = 0x0004, + + //! Whether the `HLInst` is a jump. + kFlagIsJmp = 0x0008, + //! Whether the `HLInst` is a conditional jump. + kFlagIsJcc = 0x0010, + + //! Whether the `HLInst` is an unconditinal jump or conditional jump that is + //! likely to be taken. + kFlagIsTaken = 0x0020, + + //! Whether the `HLNode` will return from a function. + //! + //! This flag is used by both `HLSentinel` and `HLRet`. + kFlagIsRet = 0x0040, + + //! Whether the instruction is special. + kFlagIsSpecial = 0x0080, + + //! Whether the instruction is an FPU instruction. + kFlagIsFp = 0x0100 + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- @@ -136,129 +136,129 @@ struct HLNode { //! Create a new `HLNode`. //! //! \note Always use compiler to create nodes. - ASMJIT_INLINE HLNode(Compiler* compiler, uint32_t type); // Defined-Later. + ASMJIT_INLINE HLNode(Compiler* compiler, uint32_t type) noexcept; // Defined-Later. //! Destroy the `HLNode`. //! //! NOTE: Nodes are zone allocated, there should be no code in the destructor. - ASMJIT_INLINE ~HLNode() {} + ASMJIT_INLINE ~HLNode() noexcept {} // -------------------------------------------------------------------------- // [Accessors - List] // -------------------------------------------------------------------------- //! Get previous node in the compiler stream. - ASMJIT_INLINE HLNode* getPrev() const { return _prev; } + ASMJIT_INLINE HLNode* getPrev() const noexcept { return _prev; } //! Get next node in the compiler stream. - ASMJIT_INLINE HLNode* getNext() const { return _next; } + ASMJIT_INLINE HLNode* getNext() const noexcept { return _next; } // -------------------------------------------------------------------------- // [Accessors - Comment] // -------------------------------------------------------------------------- //! Get an inline comment string. - ASMJIT_INLINE const char* getComment() const { return _comment; } + ASMJIT_INLINE const char* getComment() const noexcept { return _comment; } //! Set an inline comment string to `comment`. - ASMJIT_INLINE void setComment(const char* comment) { _comment = comment; } + ASMJIT_INLINE void setComment(const char* comment) noexcept { _comment = comment; } // -------------------------------------------------------------------------- // [Accessors - Type and Flags] // -------------------------------------------------------------------------- - //! Get the node type, see \ref HLNodeType. - ASMJIT_INLINE uint32_t getType() const { return _type; } + //! Get the node type, see \ref Type. + ASMJIT_INLINE uint32_t getType() const noexcept { return _type; } //! Get the node flags. - ASMJIT_INLINE uint32_t getFlags() const { return _flags; } + ASMJIT_INLINE uint32_t getFlags() const noexcept { return _flags; } //! Get whether the instruction has flag `flag`. - ASMJIT_INLINE bool hasFlag(uint32_t flag) const { return (static_cast(_flags) & flag) != 0; } + ASMJIT_INLINE bool hasFlag(uint32_t flag) const noexcept { return (static_cast(_flags) & flag) != 0; } //! Set node flags to `flags`. - ASMJIT_INLINE void setFlags(uint32_t flags) { _flags = static_cast(flags); } + ASMJIT_INLINE void setFlags(uint32_t flags) noexcept { _flags = static_cast(flags); } //! Add instruction `flags`. - ASMJIT_INLINE void orFlags(uint32_t flags) { _flags |= static_cast(flags); } + ASMJIT_INLINE void orFlags(uint32_t flags) noexcept { _flags |= static_cast(flags); } //! And instruction `flags`. - ASMJIT_INLINE void andFlags(uint32_t flags) { _flags &= static_cast(flags); } + ASMJIT_INLINE void andFlags(uint32_t flags) noexcept { _flags &= static_cast(flags); } //! Clear instruction `flags`. - ASMJIT_INLINE void andNotFlags(uint32_t flags) { _flags &= ~static_cast(flags); } + ASMJIT_INLINE void andNotFlags(uint32_t flags) noexcept { _flags &= ~static_cast(flags); } //! Get whether the node has beed fetched. - ASMJIT_INLINE bool isFetched() const { return _flowId != 0; } + ASMJIT_INLINE bool isFetched() const noexcept { return _flowId != 0; } //! Get whether the node has been translated. - ASMJIT_INLINE bool isTranslated() const { return hasFlag(kHLNodeFlagIsTranslated); } + ASMJIT_INLINE bool isTranslated() const noexcept { return hasFlag(kFlagIsTranslated); } //! Get whether the node has been translated. - ASMJIT_INLINE bool isScheduled() const { return hasFlag(kHLNodeFlagIsScheduled); } + ASMJIT_INLINE bool isScheduled() const noexcept { return hasFlag(kFlagIsScheduled); } //! Get whether the node is informative only (comment, hint). - ASMJIT_INLINE bool isInformative() const { return hasFlag(kHLNodeFlagIsInformative); } + ASMJIT_INLINE bool isInformative() const noexcept { return hasFlag(kFlagIsInformative); } //! Whether the `HLInst` node is an unconditional jump. - ASMJIT_INLINE bool isJmp() const { return hasFlag(kHLNodeFlagIsJmp); } + ASMJIT_INLINE bool isJmp() const noexcept { return hasFlag(kFlagIsJmp); } //! Whether the `HLInst` node is a conditional jump. - ASMJIT_INLINE bool isJcc() const { return hasFlag(kHLNodeFlagIsJcc); } + ASMJIT_INLINE bool isJcc() const noexcept { return hasFlag(kFlagIsJcc); } //! Whether the `HLInst` node is a conditional/unconditional jump. - ASMJIT_INLINE bool isJmpOrJcc() const { return hasFlag(kHLNodeFlagIsJmp | kHLNodeFlagIsJcc); } + ASMJIT_INLINE bool isJmpOrJcc() const noexcept { return hasFlag(kFlagIsJmp | kFlagIsJcc); } //! Whether the `HLInst` node is a return. - ASMJIT_INLINE bool isRet() const { return hasFlag(kHLNodeFlagIsRet); } + ASMJIT_INLINE bool isRet() const noexcept { return hasFlag(kFlagIsRet); } //! Get whether the node is `HLInst` and the instruction is special. - ASMJIT_INLINE bool isSpecial() const { return hasFlag(kHLNodeFlagIsSpecial); } + ASMJIT_INLINE bool isSpecial() const noexcept { return hasFlag(kFlagIsSpecial); } //! Get whether the node is `HLInst` and the instruction uses x87-FPU. - ASMJIT_INLINE bool isFp() const { return hasFlag(kHLNodeFlagIsFp); } + ASMJIT_INLINE bool isFp() const noexcept { return hasFlag(kFlagIsFp); } // -------------------------------------------------------------------------- // [Accessors - FlowId] // -------------------------------------------------------------------------- //! Get flow index. - ASMJIT_INLINE uint32_t getFlowId() const { return _flowId; } + ASMJIT_INLINE uint32_t getFlowId() const noexcept { return _flowId; } //! Set flow index. - ASMJIT_INLINE void setFlowId(uint32_t flowId) { _flowId = flowId; } + ASMJIT_INLINE void setFlowId(uint32_t flowId) noexcept { _flowId = flowId; } // -------------------------------------------------------------------------- // [Accessors - TokenId] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool hasTokenId(uint32_t id) const { return _tokenId == id; } - ASMJIT_INLINE uint32_t getTokenId() const { return _tokenId; } - ASMJIT_INLINE void setTokenId(uint32_t id) { _tokenId = id; } + ASMJIT_INLINE bool hasTokenId(uint32_t id) const noexcept { return _tokenId == id; } + ASMJIT_INLINE uint32_t getTokenId() const noexcept { return _tokenId; } + ASMJIT_INLINE void setTokenId(uint32_t id) noexcept { _tokenId = id; } // -------------------------------------------------------------------------- // [Accessors - VarMap] // -------------------------------------------------------------------------- //! Get whether node contains variable allocation instructions. - ASMJIT_INLINE bool hasMap() const { return _map != nullptr; } + ASMJIT_INLINE bool hasMap() const noexcept { return _map != nullptr; } //! Get variable allocation instructions. - ASMJIT_INLINE VarMap* getMap() const { return _map; } + ASMJIT_INLINE VarMap* getMap() const noexcept { return _map; } //! Get variable allocation instructions casted to `T*`. template - ASMJIT_INLINE T* getMap() const { return static_cast(_map); } + ASMJIT_INLINE T* getMap() const noexcept { return static_cast(_map); } //! Set variable allocation instructions. - ASMJIT_INLINE void setMap(VarMap* map) { _map = map; } + ASMJIT_INLINE void setMap(VarMap* map) noexcept { _map = map; } // -------------------------------------------------------------------------- // [Accessors - VarState] // -------------------------------------------------------------------------- //! Get whether the node has an associated `VarState`. - ASMJIT_INLINE bool hasState() const { return _state != nullptr; } + ASMJIT_INLINE bool hasState() const noexcept { return _state != nullptr; } //! Get node state. - ASMJIT_INLINE VarState* getState() const { return _state; } + ASMJIT_INLINE VarState* getState() const noexcept { return _state; } //! Get node state casted to `T*`. template - ASMJIT_INLINE T* getState() const { return static_cast(_state); } + ASMJIT_INLINE T* getState() const noexcept { return static_cast(_state); } //! Set node state. - ASMJIT_INLINE void setState(VarState* state) { _state = state; } + ASMJIT_INLINE void setState(VarState* state) noexcept { _state = state; } // -------------------------------------------------------------------------- // [Accessors - Liveness] // -------------------------------------------------------------------------- //! Get whether the node has variable liveness bits. - ASMJIT_INLINE bool hasLiveness() const { return _liveness != nullptr; } + ASMJIT_INLINE bool hasLiveness() const noexcept { return _liveness != nullptr; } //! Get variable liveness bits. - ASMJIT_INLINE BitArray* getLiveness() const { return _liveness; } + ASMJIT_INLINE BitArray* getLiveness() const noexcept { return _liveness; } //! Set variable liveness bits. - ASMJIT_INLINE void setLiveness(BitArray* liveness) { _liveness = liveness; } + ASMJIT_INLINE void setLiveness(BitArray* liveness) noexcept { _liveness = liveness; } // -------------------------------------------------------------------------- // [Members] @@ -269,11 +269,11 @@ struct HLNode { //! Next node. HLNode* _next; - //! Node type, see \ref HLNodeType. + //! Node type, see \ref Type. uint8_t _type; //! Count of operands (if the node has operands, otherwise zero). uint8_t _opCount; - //! Node flags, different meaning for every type of the node. + //! Flags, different meaning for every type of the node. uint16_t _flags; //! Flow index. @@ -312,7 +312,7 @@ struct HLNode { // [asmjit::HLInst] // ============================================================================ -//! Instruction node (HL). +//! Instruction (HL). //! //! Wraps an instruction with its options and operands. struct HLInst : public HLNode { @@ -323,8 +323,8 @@ struct HLInst : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLInst` instance. - ASMJIT_INLINE HLInst(Compiler* compiler, uint32_t instId, uint32_t instOptions, Operand* opList, uint32_t opCount) - : HLNode(compiler, kHLNodeTypeInst) { + ASMJIT_INLINE HLInst(Compiler* compiler, uint32_t instId, uint32_t instOptions, Operand* opList, uint32_t opCount) noexcept + : HLNode(compiler, kTypeInst) { _instId = static_cast(instId); _reserved = 0; @@ -337,71 +337,71 @@ struct HLInst : public HLNode { } //! Destroy the `HLInst` instance. - ASMJIT_INLINE ~HLInst() {} + ASMJIT_INLINE ~HLInst() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the instruction id, see `X86InstId`. - ASMJIT_INLINE uint32_t getInstId() const { return _instId; } + ASMJIT_INLINE uint32_t getInstId() const noexcept { return _instId; } //! Set the instruction id to `instId`. //! //! NOTE: Please do not modify instruction code if you don't know what you //! are doing. Incorrect instruction code and/or operands can cause random //! errors in production builds and will most probably trigger assertion //! failures in debug builds. - ASMJIT_INLINE void setInstId(uint32_t instId) { _instId = static_cast(instId); } + ASMJIT_INLINE void setInstId(uint32_t instId) noexcept { _instId = static_cast(instId); } //! Whether the instruction is either a jump or a conditional jump likely to //! be taken. - ASMJIT_INLINE bool isTaken() const { return hasFlag(kHLNodeFlagIsTaken); } + ASMJIT_INLINE bool isTaken() const noexcept { return hasFlag(kFlagIsTaken); } //! Get emit options. - ASMJIT_INLINE uint32_t getOptions() const { return _instOptions; } + ASMJIT_INLINE uint32_t getOptions() const noexcept { return _instOptions; } //! Set emit options. - ASMJIT_INLINE void setOptions(uint32_t options) { _instOptions = options; } + ASMJIT_INLINE void setOptions(uint32_t options) noexcept { _instOptions = options; } //! Add emit options. - ASMJIT_INLINE void addOptions(uint32_t options) { _instOptions |= options; } + ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _instOptions |= options; } //! Mask emit options. - ASMJIT_INLINE void andOptions(uint32_t options) { _instOptions &= options; } + ASMJIT_INLINE void andOptions(uint32_t options) noexcept { _instOptions &= options; } //! Clear emit options. - ASMJIT_INLINE void delOptions(uint32_t options) { _instOptions &= ~options; } + ASMJIT_INLINE void delOptions(uint32_t options) noexcept { _instOptions &= ~options; } //! Get operands count. - ASMJIT_INLINE uint32_t getOpCount() const { return _opCount; } + ASMJIT_INLINE uint32_t getOpCount() const noexcept { return _opCount; } //! Get operands list. - ASMJIT_INLINE Operand* getOpList() { return _opList; } + ASMJIT_INLINE Operand* getOpList() noexcept { return _opList; } //! \overload - ASMJIT_INLINE const Operand* getOpList() const { return _opList; } + ASMJIT_INLINE const Operand* getOpList() const noexcept { return _opList; } //! Get whether the instruction contains a memory operand. - ASMJIT_INLINE bool hasMemOp() const { return _memOpIndex != 0xFF; } + ASMJIT_INLINE bool hasMemOp() const noexcept { return _memOpIndex != 0xFF; } //! Get memory operand. //! //! NOTE: Can only be called if the instruction has such operand, //! see `hasMemOp()`. - ASMJIT_INLINE BaseMem* getMemOp() const { + ASMJIT_INLINE BaseMem* getMemOp() const noexcept { ASMJIT_ASSERT(hasMemOp()); return static_cast(&_opList[_memOpIndex]); } //! \overload template - ASMJIT_INLINE T* getMemOp() const { + ASMJIT_INLINE T* getMemOp() const noexcept { ASMJIT_ASSERT(hasMemOp()); return static_cast(&_opList[_memOpIndex]); } //! Set memory operand index, `0xFF` means no memory operand. - ASMJIT_INLINE void setMemOpIndex(uint32_t index) { _memOpIndex = static_cast(index); } + ASMJIT_INLINE void setMemOpIndex(uint32_t index) noexcept { _memOpIndex = static_cast(index); } //! Reset memory operand index to `0xFF` (no operand). - ASMJIT_INLINE void resetMemOpIndex() { _memOpIndex = 0xFF; } + ASMJIT_INLINE void resetMemOpIndex() noexcept { _memOpIndex = 0xFF; } // -------------------------------------------------------------------------- // [Utils] // -------------------------------------------------------------------------- - ASMJIT_INLINE void _updateMemOp() { + ASMJIT_INLINE void _updateMemOp() noexcept { Operand* opList = getOpList(); uint32_t opCount = getOpCount(); @@ -436,7 +436,7 @@ _Update: // [asmjit::HLJump] // ============================================================================ -//! Jump node (HL). +//! Conditional or direct jump (HL). //! //! Extension of `HLInst` node, which stores more information about the jump. struct HLJump : public HLInst { @@ -446,16 +446,16 @@ struct HLJump : public HLInst { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE HLJump(Compiler* compiler, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) : + ASMJIT_INLINE HLJump(Compiler* compiler, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) noexcept : HLInst(compiler, code, options, opList, opCount) {} - ASMJIT_INLINE ~HLJump() {} + ASMJIT_INLINE ~HLJump() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- - ASMJIT_INLINE HLLabel* getTarget() const { return _target; } - ASMJIT_INLINE HLJump* getJumpNext() const { return _jumpNext; } + ASMJIT_INLINE HLLabel* getTarget() const noexcept { return _target; } + ASMJIT_INLINE HLJump* getJumpNext() const noexcept { return _jumpNext; } // -------------------------------------------------------------------------- // [Members] @@ -471,7 +471,7 @@ struct HLJump : public HLInst { // [asmjit::HLData] // ============================================================================ -//! Data node (HL). +//! Data (HL). //! //! Wraps `.data` directive. The node contains data that will be placed at the //! node's position in the assembler stream. The data is considered to be RAW; @@ -486,8 +486,8 @@ struct HLData : public HLNode { enum { kInlineBufferSize = 12 }; //! Create a new `HLData` instance. - ASMJIT_INLINE HLData(Compiler* compiler, void* data, uint32_t size) - : HLNode(compiler, kHLNodeTypeData) { + ASMJIT_INLINE HLData(Compiler* compiler, void* data, uint32_t size) noexcept + : HLNode(compiler, kTypeData) { _size = size; if (size <= kInlineBufferSize) { @@ -500,16 +500,16 @@ struct HLData : public HLNode { } //! Destroy the `HLData` instance. - ASMJIT_INLINE ~HLData() {} + ASMJIT_INLINE ~HLData() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get size of the data. - uint32_t getSize() const { return _size; } + uint32_t getSize() const noexcept { return _size; } //! Get pointer to the data. - uint8_t* getData() const { return _size <= kInlineBufferSize ? const_cast(_data.buf) : _data.ptr; } + uint8_t* getData() const noexcept { return _size <= kInlineBufferSize ? const_cast(_data.buf) : _data.ptr; } // -------------------------------------------------------------------------- // [Members] @@ -530,7 +530,7 @@ struct HLData : public HLNode { // [asmjit::HLAlign] // ============================================================================ -//! Align node (HL). +//! Align directive (HL). //! //! Wraps `.align` directive. struct HLAlign : public HLNode { @@ -541,29 +541,29 @@ struct HLAlign : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLAlign` instance. - ASMJIT_INLINE HLAlign(Compiler* compiler, uint32_t alignMode, uint32_t offset) - : HLNode(compiler, kHLNodeTypeAlign) { + ASMJIT_INLINE HLAlign(Compiler* compiler, uint32_t alignMode, uint32_t offset) noexcept + : HLNode(compiler, kTypeAlign) { _alignMode = alignMode; _offset = offset; } //! Destroy the `HLAlign` instance. - ASMJIT_INLINE ~HLAlign() {} + ASMJIT_INLINE ~HLAlign() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get align mode. - ASMJIT_INLINE uint32_t getAlignMode() const { return _alignMode; } + ASMJIT_INLINE uint32_t getAlignMode() const noexcept { return _alignMode; } //! Set align mode. - ASMJIT_INLINE void setAlignMode(uint32_t alignMode) { _alignMode = alignMode; } + ASMJIT_INLINE void setAlignMode(uint32_t alignMode) noexcept { _alignMode = alignMode; } //! Get align offset in bytes. - ASMJIT_INLINE uint32_t getOffset() const { return _offset; } + ASMJIT_INLINE uint32_t getOffset() const noexcept { return _offset; } //! Set align offset in bytes to `offset`. - ASMJIT_INLINE void setOffset(uint32_t offset) { _offset = offset; } + ASMJIT_INLINE void setOffset(uint32_t offset) noexcept { _offset = offset; } // -------------------------------------------------------------------------- // [Members] @@ -579,7 +579,7 @@ struct HLAlign : public HLNode { // [asmjit::HLLabel] // ============================================================================ -//! label node (HL). +//! label (HL). struct HLLabel : public HLNode { ASMJIT_NO_COPY(HLLabel) @@ -588,8 +588,8 @@ struct HLLabel : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLLabel` instance. - ASMJIT_INLINE HLLabel(Compiler* compiler, uint32_t labelId) - : HLNode(compiler, kHLNodeTypeLabel) { + ASMJIT_INLINE HLLabel(Compiler* compiler, uint32_t labelId) noexcept + : HLNode(compiler, kTypeLabel) { _id = labelId; _numRefs = 0; @@ -597,36 +597,36 @@ struct HLLabel : public HLNode { } //! Destroy the `HLLabel` instance. - ASMJIT_INLINE ~HLLabel() {} + ASMJIT_INLINE ~HLLabel() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get target label. - ASMJIT_INLINE Label getLabel() const { return Label(_id); } + ASMJIT_INLINE Label getLabel() const noexcept { return Label(_id); } //! Get target label id. - ASMJIT_INLINE uint32_t getLabelId() const { return _id; } + ASMJIT_INLINE uint32_t getLabelId() const noexcept { return _id; } //! Get first jmp instruction. - ASMJIT_INLINE HLJump* getFrom() const { return _from; } + ASMJIT_INLINE HLJump* getFrom() const noexcept { return _from; } //! Get whether the node has assigned state. - ASMJIT_INLINE bool hasState() const { return _state != nullptr; } + ASMJIT_INLINE bool hasState() const noexcept { return _state != nullptr; } //! Get state for this target. - ASMJIT_INLINE VarState* getState() const { return _state; } + ASMJIT_INLINE VarState* getState() const noexcept { return _state; } //! Set state for this target. - ASMJIT_INLINE void setState(VarState* state) { _state = state; } + ASMJIT_INLINE void setState(VarState* state) noexcept { _state = state; } //! Get number of jumps to this target. - ASMJIT_INLINE uint32_t getNumRefs() const { return _numRefs; } + ASMJIT_INLINE uint32_t getNumRefs() const noexcept { return _numRefs; } //! Set number of jumps to this target. - ASMJIT_INLINE void setNumRefs(uint32_t i) { _numRefs = i; } + ASMJIT_INLINE void setNumRefs(uint32_t i) noexcept { _numRefs = i; } //! Add number of jumps to this target. - ASMJIT_INLINE void addNumRefs(uint32_t i = 1) { _numRefs += i; } + ASMJIT_INLINE void addNumRefs(uint32_t i = 1) noexcept { _numRefs += i; } //! Subtract number of jumps to this target. - ASMJIT_INLINE void subNumRefs(uint32_t i = 1) { _numRefs -= i; } + ASMJIT_INLINE void subNumRefs(uint32_t i = 1) noexcept { _numRefs -= i; } // -------------------------------------------------------------------------- // [Members] @@ -645,7 +645,7 @@ struct HLLabel : public HLNode { // [asmjit::HLComment] // ============================================================================ -//! Comment node (HL). +//! Comment (HL). struct HLComment : public HLNode { ASMJIT_NO_COPY(HLComment) @@ -654,22 +654,22 @@ struct HLComment : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLComment` instance. - ASMJIT_INLINE HLComment(Compiler* compiler, const char* comment) - : HLNode(compiler, kHLNodeTypeComment) { + ASMJIT_INLINE HLComment(Compiler* compiler, const char* comment) noexcept + : HLNode(compiler, kTypeComment) { - orFlags(kHLNodeFlagIsInformative); + orFlags(kFlagIsInformative); _comment = comment; } //! Destroy the `HLComment` instance. - ASMJIT_INLINE ~HLComment() {} + ASMJIT_INLINE ~HLComment() noexcept {} }; // ============================================================================ // [asmjit::HLSentinel] // ============================================================================ -//! Sentinel node (HL). +//! Sentinel (HL). struct HLSentinel : public HLNode { ASMJIT_NO_COPY(HLSentinel) @@ -678,13 +678,13 @@ struct HLSentinel : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLSentinel` instance. - ASMJIT_INLINE HLSentinel(Compiler* compiler) - : HLNode(compiler, kHLNodeTypeSentinel) { - _flags |= kHLNodeFlagIsRet; + ASMJIT_INLINE HLSentinel(Compiler* compiler) noexcept + : HLNode(compiler, kTypeSentinel) { + _flags |= kFlagIsRet; } //! Destroy the `HLSentinel` instance. - ASMJIT_INLINE ~HLSentinel() {} + ASMJIT_INLINE ~HLSentinel() noexcept {} }; // ============================================================================ @@ -700,34 +700,34 @@ struct HLHint : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLHint` instance. - ASMJIT_INLINE HLHint(Compiler* compiler, VarData* vd, uint32_t hint, uint32_t value) - : HLNode(compiler, kHLNodeTypeHint) { + ASMJIT_INLINE HLHint(Compiler* compiler, VarData* vd, uint32_t hint, uint32_t value) noexcept + : HLNode(compiler, kTypeHint) { - orFlags(kHLNodeFlagIsInformative); + orFlags(kFlagIsInformative); _vd = vd; _hint = hint; _value = value; } //! Destroy the `HLHint` instance. - ASMJIT_INLINE ~HLHint() {} + ASMJIT_INLINE ~HLHint() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get variable. - ASMJIT_INLINE VarData* getVd() const { return _vd; } + ASMJIT_INLINE VarData* getVd() const noexcept { return _vd; } //! Get hint it (see `kVarHint)`. - ASMJIT_INLINE uint32_t getHint() const{ return _hint; } + ASMJIT_INLINE uint32_t getHint() const noexcept { return _hint; } //! Set hint it (see `kVarHint)`. - ASMJIT_INLINE void setHint(uint32_t hint) { _hint = hint; } + ASMJIT_INLINE void setHint(uint32_t hint) noexcept { _hint = hint; } //! Get hint value. - ASMJIT_INLINE uint32_t getValue() const { return _value; } + ASMJIT_INLINE uint32_t getValue() const noexcept { return _value; } //! Set hint value. - ASMJIT_INLINE void setValue(uint32_t value) { _value = value; } + ASMJIT_INLINE void setValue(uint32_t value) noexcept { _value = value; } // -------------------------------------------------------------------------- // [Members] @@ -745,7 +745,7 @@ struct HLHint : public HLNode { // [asmjit::HLFunc] // ============================================================================ -//! Function node (HL). +//! Function (HL). struct HLFunc : public HLNode { ASMJIT_NO_COPY(HLFunc) @@ -756,8 +756,8 @@ struct HLFunc : public HLNode { //! Create a new `HLFunc` instance. //! //! Always use `Compiler::addFunc()` to create an `HLFunc` instance. - ASMJIT_INLINE HLFunc(Compiler* compiler) - : HLNode(compiler, kHLNodeTypeFunc), + ASMJIT_INLINE HLFunc(Compiler* compiler) noexcept + : HLNode(compiler, kTypeFunc), _entryNode(nullptr), _exitNode(nullptr), _decl(nullptr), @@ -774,88 +774,98 @@ struct HLFunc : public HLNode { _callStackSize(0) {} //! Destroy the `HLFunc` instance. - ASMJIT_INLINE ~HLFunc() {} + ASMJIT_INLINE ~HLFunc() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get function entry `HLLabel`. - ASMJIT_INLINE HLLabel* getEntryNode() const { return _entryNode; } + ASMJIT_INLINE HLLabel* getEntryNode() const noexcept { return _entryNode; } //! Get function exit `HLLabel`. - ASMJIT_INLINE HLLabel* getExitNode() const { return _exitNode; } + ASMJIT_INLINE HLLabel* getExitNode() const noexcept { return _exitNode; } //! Get function entry label. - ASMJIT_INLINE Label getEntryLabel() const { return _entryNode->getLabel(); } + ASMJIT_INLINE Label getEntryLabel() const noexcept { return _entryNode->getLabel(); } //! Get function exit label. - ASMJIT_INLINE Label getExitLabel() const { return _exitNode->getLabel(); } + ASMJIT_INLINE Label getExitLabel() const noexcept { return _exitNode->getLabel(); } //! Get the function end sentinel. - ASMJIT_INLINE HLSentinel* getEnd() const { return _end; } + ASMJIT_INLINE HLSentinel* getEnd() const noexcept { return _end; } //! Get function declaration. - ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } + ASMJIT_INLINE FuncDecl* getDecl() const noexcept { return _decl; } //! Get arguments count. - ASMJIT_INLINE uint32_t getNumArgs() const { return _decl->getNumArgs(); } + ASMJIT_INLINE uint32_t getNumArgs() const noexcept { return _decl->getNumArgs(); } //! Get arguments list. - ASMJIT_INLINE VarData** getArgs() const { return _args; } + ASMJIT_INLINE VarData** getArgs() const noexcept { return _args; } //! Get argument at `i`. - ASMJIT_INLINE VarData* getArg(uint32_t i) const { + ASMJIT_INLINE VarData* getArg(uint32_t i) const noexcept { ASMJIT_ASSERT(i < getNumArgs()); return _args[i]; } //! Set argument at `i`. - ASMJIT_INLINE void setArg(uint32_t i, VarData* vd) { + ASMJIT_INLINE void setArg(uint32_t i, VarData* vd) noexcept { ASMJIT_ASSERT(i < getNumArgs()); _args[i] = vd; } //! Reset argument at `i`. - ASMJIT_INLINE void resetArg(uint32_t i) { + ASMJIT_INLINE void resetArg(uint32_t i) noexcept { ASMJIT_ASSERT(i < getNumArgs()); _args[i] = nullptr; } //! Get function hints. - ASMJIT_INLINE uint32_t getFuncHints() const { return _funcHints; } + ASMJIT_INLINE uint32_t getFuncHints() const noexcept { return _funcHints; } //! Get function flags. - ASMJIT_INLINE uint32_t getFuncFlags() const { return _funcFlags; } + ASMJIT_INLINE uint32_t getFuncFlags() const noexcept { return _funcFlags; } //! Get whether the _funcFlags has `flag` - ASMJIT_INLINE bool hasFuncFlag(uint32_t flag) const { return (_funcFlags & flag) != 0; } + ASMJIT_INLINE bool hasFuncFlag(uint32_t flag) const noexcept { return (_funcFlags & flag) != 0; } //! Set function `flag`. - ASMJIT_INLINE void addFuncFlags(uint32_t flags) { _funcFlags |= flags; } + ASMJIT_INLINE void addFuncFlags(uint32_t flags) noexcept { _funcFlags |= flags; } //! Clear function `flag`. - ASMJIT_INLINE void clearFuncFlags(uint32_t flags) { _funcFlags &= ~flags; } + ASMJIT_INLINE void clearFuncFlags(uint32_t flags) noexcept { _funcFlags &= ~flags; } //! Get whether the function is naked. - ASMJIT_INLINE bool isNaked() const { return hasFuncFlag(kFuncFlagIsNaked); } + ASMJIT_INLINE bool isNaked() const noexcept { return hasFuncFlag(kFuncFlagIsNaked); } //! Get whether the function is also a caller. - ASMJIT_INLINE bool isCaller() const { return hasFuncFlag(kFuncFlagIsCaller); } + ASMJIT_INLINE bool isCaller() const noexcept { return hasFuncFlag(kFuncFlagIsCaller); } //! Get whether the required stack alignment is lower than expected one, //! thus it has to be aligned manually. - ASMJIT_INLINE bool isStackMisaligned() const { return hasFuncFlag(kFuncFlagIsStackMisaligned); } + ASMJIT_INLINE bool isStackMisaligned() const noexcept { return hasFuncFlag(kFuncFlagIsStackMisaligned); } //! Get whether the stack pointer is adjusted inside function prolog/epilog. - ASMJIT_INLINE bool isStackAdjusted() const { return hasFuncFlag(kFuncFlagIsStackAdjusted); } + ASMJIT_INLINE bool isStackAdjusted() const noexcept { return hasFuncFlag(kFuncFlagIsStackAdjusted); } //! Get whether the function is finished. - ASMJIT_INLINE bool isFinished() const { return hasFuncFlag(kFuncFlagIsFinished); } + ASMJIT_INLINE bool isFinished() const noexcept { return hasFuncFlag(kFuncFlagIsFinished); } //! Get expected stack alignment. - ASMJIT_INLINE uint32_t getExpectedStackAlignment() const { return _expectedStackAlignment; } + ASMJIT_INLINE uint32_t getExpectedStackAlignment() const noexcept { + return _expectedStackAlignment; + } + //! Set expected stack alignment. - ASMJIT_INLINE void setExpectedStackAlignment(uint32_t alignment) { _expectedStackAlignment = alignment; } + ASMJIT_INLINE void setExpectedStackAlignment(uint32_t alignment) noexcept { + _expectedStackAlignment = alignment; + } //! Get required stack alignment. - ASMJIT_INLINE uint32_t getRequiredStackAlignment() const { return _requiredStackAlignment; } + ASMJIT_INLINE uint32_t getRequiredStackAlignment() const noexcept { + return _requiredStackAlignment; + } + //! Set required stack alignment. - ASMJIT_INLINE void setRequiredStackAlignment(uint32_t alignment) { _requiredStackAlignment = alignment; } + ASMJIT_INLINE void setRequiredStackAlignment(uint32_t alignment) noexcept { + _requiredStackAlignment = alignment; + } //! Update required stack alignment so it's not lower than expected //! stack alignment. - ASMJIT_INLINE void updateRequiredStackAlignment() { + ASMJIT_INLINE void updateRequiredStackAlignment() noexcept { if (_requiredStackAlignment <= _expectedStackAlignment) { _requiredStackAlignment = _expectedStackAlignment; clearFuncFlags(kFuncFlagIsStackMisaligned); @@ -866,32 +876,32 @@ struct HLFunc : public HLNode { } //! Set stack "Red Zone" size. - ASMJIT_INLINE uint32_t getRedZoneSize() const { return _redZoneSize; } + ASMJIT_INLINE uint32_t getRedZoneSize() const noexcept { return _redZoneSize; } //! Get stack "Red Zone" size. - ASMJIT_INLINE void setRedZoneSize(uint32_t s) { _redZoneSize = static_cast(s); } + ASMJIT_INLINE void setRedZoneSize(uint32_t s) noexcept { _redZoneSize = static_cast(s); } //! Set stack "Spill Zone" size. - ASMJIT_INLINE uint32_t getSpillZoneSize() const { return _spillZoneSize; } + ASMJIT_INLINE uint32_t getSpillZoneSize() const noexcept { return _spillZoneSize; } //! Get stack "Spill Zone" size. - ASMJIT_INLINE void setSpillZoneSize(uint32_t s) { _spillZoneSize = static_cast(s); } + ASMJIT_INLINE void setSpillZoneSize(uint32_t s) noexcept { _spillZoneSize = static_cast(s); } //! Get stack size used by function arguments. - ASMJIT_INLINE uint32_t getArgStackSize() const { return _argStackSize; } + ASMJIT_INLINE uint32_t getArgStackSize() const noexcept { return _argStackSize; } //! Get stack size used by variables and memory allocated on the stack. - ASMJIT_INLINE uint32_t getMemStackSize() const { return _memStackSize; } + ASMJIT_INLINE uint32_t getMemStackSize() const noexcept { return _memStackSize; } //! Get stack size used by function calls. - ASMJIT_INLINE uint32_t getCallStackSize() const { return _callStackSize; } + ASMJIT_INLINE uint32_t getCallStackSize() const noexcept { return _callStackSize; } //! Merge stack size used by function call with `s`. - ASMJIT_INLINE void mergeCallStackSize(uint32_t s) { if (_callStackSize < s) _callStackSize = s; } + ASMJIT_INLINE void mergeCallStackSize(uint32_t s) noexcept { if (_callStackSize < s) _callStackSize = s; } // -------------------------------------------------------------------------- // [Hints] // -------------------------------------------------------------------------- //! Set function hint. - ASMJIT_INLINE void setHint(uint32_t hint, uint32_t value) { + ASMJIT_INLINE void setHint(uint32_t hint, uint32_t value) noexcept { ASMJIT_ASSERT(hint <= 31); ASMJIT_ASSERT(value <= 1); @@ -900,7 +910,7 @@ struct HLFunc : public HLNode { } //! Get function hint. - ASMJIT_INLINE uint32_t getHint(uint32_t hint) const { + ASMJIT_INLINE uint32_t getHint(uint32_t hint) const noexcept { ASMJIT_ASSERT(hint <= 31); return (_funcHints >> hint) & 0x1; } @@ -955,7 +965,7 @@ struct HLFunc : public HLNode { // [asmjit::HLRet] // ============================================================================ -//! Return node (HL). +//! Function return (HL). struct HLRet : public HLNode { ASMJIT_NO_COPY(HLRet) @@ -964,30 +974,30 @@ struct HLRet : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLRet` instance. - ASMJIT_INLINE HLRet(Compiler* compiler, const Operand& o0, const Operand& o1) - : HLNode(compiler, kHLNodeTypeRet) { + ASMJIT_INLINE HLRet(Compiler* compiler, const Operand& o0, const Operand& o1) noexcept + : HLNode(compiler, kTypeRet) { - _flags |= kHLNodeFlagIsRet; + _flags |= kFlagIsRet; _ret[0] = o0; _ret[1] = o1; } //! Destroy the `HLRet` instance. - ASMJIT_INLINE ~HLRet() {} + ASMJIT_INLINE ~HLRet() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the first return operand. - ASMJIT_INLINE Operand& getFirst() { return _ret[0]; } + ASMJIT_INLINE Operand& getFirst() noexcept { return _ret[0]; } //! \overload - ASMJIT_INLINE const Operand& getFirst() const { return _ret[0]; } + ASMJIT_INLINE const Operand& getFirst() const noexcept { return _ret[0]; } //! Get the second return operand. - ASMJIT_INLINE Operand& getSecond() { return _ret[1]; } + ASMJIT_INLINE Operand& getSecond() noexcept { return _ret[1]; } //! \overload - ASMJIT_INLINE const Operand& getSecond() const { return _ret[1]; } + ASMJIT_INLINE const Operand& getSecond() const noexcept { return _ret[1]; } // -------------------------------------------------------------------------- // [Members] @@ -1001,7 +1011,7 @@ struct HLRet : public HLNode { // [asmjit::HLCall] // ============================================================================ -//! Call node (HL). +//! Function call (HL). struct HLCall : public HLNode { ASMJIT_NO_COPY(HLCall) @@ -1010,45 +1020,45 @@ struct HLCall : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLCall` instance. - ASMJIT_INLINE HLCall(Compiler* compiler, const Operand& target) - : HLNode(compiler, kHLNodeTypeCall), + ASMJIT_INLINE HLCall(Compiler* compiler, const Operand& target) noexcept + : HLNode(compiler, kTypeCall), _decl(nullptr), _target(target), _args(nullptr) {} //! Destroy the `HLCall` instance. - ASMJIT_INLINE ~HLCall() {} + ASMJIT_INLINE ~HLCall() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get function declaration. - ASMJIT_INLINE FuncDecl* getDecl() const { return _decl; } + ASMJIT_INLINE FuncDecl* getDecl() const noexcept { return _decl; } //! Get target operand. - ASMJIT_INLINE Operand& getTarget() { return _target; } + ASMJIT_INLINE Operand& getTarget() noexcept { return _target; } //! \overload - ASMJIT_INLINE const Operand& getTarget() const { return _target; } + ASMJIT_INLINE const Operand& getTarget() const noexcept { return _target; } //! Get return at `i`. - ASMJIT_INLINE Operand& getRet(uint32_t i = 0) { + ASMJIT_INLINE Operand& getRet(uint32_t i = 0) noexcept { ASMJIT_ASSERT(i < 2); return _ret[i]; } //! \overload - ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const { + ASMJIT_INLINE const Operand& getRet(uint32_t i = 0) const noexcept { ASMJIT_ASSERT(i < 2); return _ret[i]; } //! Get argument at `i`. - ASMJIT_INLINE Operand& getArg(uint32_t i) { + ASMJIT_INLINE Operand& getArg(uint32_t i) noexcept { ASMJIT_ASSERT(i < kFuncArgCountLoHi); return _args[i]; } //! \overload - ASMJIT_INLINE const Operand& getArg(uint32_t i) const { + ASMJIT_INLINE const Operand& getArg(uint32_t i) const noexcept { ASMJIT_ASSERT(i < kFuncArgCountLoHi); return _args[i]; } @@ -1072,7 +1082,7 @@ struct HLCall : public HLNode { // [asmjit::HLCallArg] // ============================================================================ -//! Function call's argument node (HL). +//! Function call's argument (HL). struct HLCallArg : public HLNode { ASMJIT_NO_COPY(HLCallArg) @@ -1081,26 +1091,26 @@ struct HLCallArg : public HLNode { // -------------------------------------------------------------------------- //! Create a new `HLCallArg` instance. - ASMJIT_INLINE HLCallArg(Compiler* compiler, HLCall* call, VarData* sVd, VarData* cVd) - : HLNode(compiler, kHLNodeTypeCallArg), + ASMJIT_INLINE HLCallArg(Compiler* compiler, HLCall* call, VarData* sVd, VarData* cVd) noexcept + : HLNode(compiler, kTypeCallArg), _call(call), _sVd(sVd), _cVd(cVd), _args(0) {} //! Destroy the `HLCallArg` instance. - ASMJIT_INLINE ~HLCallArg() {} + ASMJIT_INLINE ~HLCallArg() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the associated function-call. - ASMJIT_INLINE HLCall* getCall() const { return _call; } + ASMJIT_INLINE HLCall* getCall() const noexcept { return _call; } //! Get source variable. - ASMJIT_INLINE VarData* getSVd() const { return _sVd; } + ASMJIT_INLINE VarData* getSVd() const noexcept { return _sVd; } //! Get conversion variable. - ASMJIT_INLINE VarData* getCVd() const { return _cVd; } + ASMJIT_INLINE VarData* getCVd() const noexcept { return _cVd; } // -------------------------------------------------------------------------- // [Members] @@ -1118,10 +1128,10 @@ struct HLCallArg : public HLNode { }; // ============================================================================ -// [asmjit::Stream] +// [asmjit::HLStream] // ============================================================================ - +// TODO: //! \} diff --git a/src/asmjit/base/logger.cpp b/src/asmjit/base/logger.cpp index 18975e8..69d68f0 100644 --- a/src/asmjit/base/logger.cpp +++ b/src/asmjit/base/logger.cpp @@ -28,7 +28,7 @@ namespace asmjit { // [asmjit::LogUtil] // ============================================================================ -bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) { +bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept { size_t currentLen = sb.getLength(); size_t commentLen = comment ? Utils::strLen(comment, kMaxCommentLength) : 0; @@ -82,18 +82,18 @@ bool LogUtil::formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLe // [asmjit::Logger - Construction / Destruction] // ============================================================================ -Logger::Logger() { +Logger::Logger() noexcept { _options = 0; ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); } -Logger::~Logger() {} +Logger::~Logger() noexcept {} // ============================================================================ // [asmjit::Logger - Logging] // ============================================================================ -void Logger::logFormat(uint32_t style, const char* fmt, ...) { +void Logger::logFormat(uint32_t style, const char* fmt, ...) noexcept { char buf[1024]; size_t len; @@ -108,7 +108,7 @@ void Logger::logFormat(uint32_t style, const char* fmt, ...) { logString(style, buf, len); } -void Logger::logBinary(uint32_t style, const void* data, size_t size) { +void Logger::logBinary(uint32_t style, const void* data, size_t size) noexcept { static const char prefix[] = ".data "; static const char hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; @@ -138,27 +138,11 @@ void Logger::logBinary(uint32_t style, const void* data, size_t size) { } } -// ============================================================================ -// [asmjit::Logger - LogBinary] -// ============================================================================ - -void Logger::setOption(uint32_t id, bool value) { - if (id >= kLoggerOptionCount) - return; - - uint32_t mask = 1 << id; - - if (value) - _options |= mask; - else - _options &= ~mask; -} - // ============================================================================ // [asmjit::Logger - Indentation] // ============================================================================ -void Logger::setIndentation(const char* indentation) { +void Logger::setIndentation(const char* indentation) noexcept { ::memset(_indentation, 0, ASMJIT_ARRAY_SIZE(_indentation)); if (!indentation) return; @@ -171,17 +155,14 @@ void Logger::setIndentation(const char* indentation) { // [asmjit::FileLogger - Construction / Destruction] // ============================================================================ -FileLogger::FileLogger(FILE* stream) : _stream(nullptr) { - setStream(stream); -} - -FileLogger::~FileLogger() {} +FileLogger::FileLogger(FILE* stream) noexcept : _stream(nullptr) { setStream(stream); } +FileLogger::~FileLogger() noexcept {} // ============================================================================ // [asmjit::FileLogger - Logging] // ============================================================================ -void FileLogger::logString(uint32_t style, const char* buf, size_t len) { +void FileLogger::logString(uint32_t style, const char* buf, size_t len) noexcept { if (!_stream) return; @@ -195,14 +176,14 @@ void FileLogger::logString(uint32_t style, const char* buf, size_t len) { // [asmjit::StringLogger - Construction / Destruction] // ============================================================================ -StringLogger::StringLogger() {} -StringLogger::~StringLogger() {} +StringLogger::StringLogger() noexcept {} +StringLogger::~StringLogger() noexcept {} // ============================================================================ // [asmjit::StringLogger - Logging] // ============================================================================ -void StringLogger::logString(uint32_t style, const char* buf, size_t len) { +void StringLogger::logString(uint32_t style, const char* buf, size_t len) noexcept { _stringBuilder.appendString(buf, len); } diff --git a/src/asmjit/base/logger.h b/src/asmjit/base/logger.h index 28ea0a4..267846c 100644 --- a/src/asmjit/base/logger.h +++ b/src/asmjit/base/logger.h @@ -9,7 +9,6 @@ #define _ASMJIT_BASE_LOGGER_H #include "../build.h" -#if !defined(ASMJIT_DISABLE_LOGGER) // [Dependencies - AsmJit] #include "../base/containers.h" @@ -25,38 +24,7 @@ namespace asmjit { //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::LoggerOption] -// ============================================================================ - -//! Logger options. -ASMJIT_ENUM(LoggerOption) { - //! Whether to output instructions also in binary form. - kLoggerOptionBinaryForm = 0, - - //! Whether to output immediates as hexadecimal numbers. - kLoggerOptionHexImmediate = 1, - //! Whether to output displacements as hexadecimal numbers. - kLoggerOptionHexDisplacement = 2, - - //! Count of logger options. - kLoggerOptionCount = 3 -}; - -// ============================================================================ -// [asmjit::LoggerStyle] -// ============================================================================ - -//! Logger style. -ASMJIT_ENUM(LoggerStyle) { - kLoggerStyleDefault = 0, - kLoggerStyleDirective = 1, - kLoggerStyleLabel = 2, - kLoggerStyleData = 3, - kLoggerStyleComment = 4, - - kLoggerStyleCount = 5 -}; +#if !defined(ASMJIT_DISABLE_LOGGER) // ============================================================================ // [asmjit::LogUtil] @@ -73,7 +41,9 @@ struct LogUtil { kMaxBinaryLength = 26 }; - static bool formatLine(StringBuilder& sb, const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment); + static bool formatLine( + StringBuilder& sb, + const uint8_t* binData, size_t binLen, size_t dispLen, size_t imLen, const char* comment) noexcept; }; #endif // ASMJIT_EXPORTS @@ -92,59 +62,88 @@ struct LogUtil { struct ASMJIT_VIRTAPI Logger { ASMJIT_NO_COPY(Logger) + // -------------------------------------------------------------------------- + // [Options] + // -------------------------------------------------------------------------- + + //! Logger options. + ASMJIT_ENUM(Options) { + //! Whether to output instructions also in binary form. + kOptionBinaryForm = 0, + + //! Whether to output immediates as hexadecimal numbers. + kOptionHexImmediate = 1, + //! Whether to output displacements as hexadecimal numbers. + kOptionHexDisplacement = 2, + + //! Count of logger options. + kOptionCount = 3 + }; + + // -------------------------------------------------------------------------- + // [Style] + // -------------------------------------------------------------------------- + + //! Logger style. + ASMJIT_ENUM(Style) { + kStyleDefault = 0, + kStyleDirective = 1, + kStyleLabel = 2, + kStyleData = 3, + kStyleComment = 4, + + kStyleCount = 5 + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- //! Create a `Logger` instance. - ASMJIT_API Logger(); + ASMJIT_API Logger() noexcept; //! Destroy the `Logger` instance. - ASMJIT_API virtual ~Logger(); + ASMJIT_API virtual ~Logger() noexcept; // -------------------------------------------------------------------------- // [Logging] // -------------------------------------------------------------------------- //! Log output. - virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) = 0; + virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept = 0; //! Log formatter message (like sprintf) sending output to `logString()` method. - ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...); + ASMJIT_API void logFormat(uint32_t style, const char* fmt, ...) noexcept; //! Log binary data. - ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size); + ASMJIT_API void logBinary(uint32_t style, const void* data, size_t size) noexcept; // -------------------------------------------------------------------------- // [Options] // -------------------------------------------------------------------------- //! Get all logger options as a single integer. - ASMJIT_INLINE uint32_t getOptions() const { - return _options; - } + ASMJIT_INLINE uint32_t getOptions() const noexcept { return _options; } //! Get the given logger option. - ASMJIT_INLINE bool getOption(uint32_t id) const { - ASMJIT_ASSERT(id < kLoggerOptionCount); - return static_cast((_options >> id) & 0x1); + ASMJIT_INLINE bool hasOption(uint32_t option) const noexcept { + return (_options & option) != 0; } - - //! Set the given logger option. - ASMJIT_API void setOption(uint32_t id, bool value); + ASMJIT_INLINE void addOptions(uint32_t options) noexcept { _options |= options; } + ASMJIT_INLINE void clearOptions(uint32_t options) noexcept { _options &= ~options; } // -------------------------------------------------------------------------- // [Indentation] // -------------------------------------------------------------------------- //! Get indentation. - ASMJIT_INLINE const char* getIndentation() const { + ASMJIT_INLINE const char* getIndentation() const noexcept { return _indentation; } //! Set indentation. - ASMJIT_API void setIndentation(const char* indentation); + ASMJIT_API void setIndentation(const char* indentation) noexcept; //! Reset indentation. - ASMJIT_INLINE void resetIndentation() { + ASMJIT_INLINE void resetIndentation() noexcept { setIndentation(nullptr); } @@ -172,10 +171,10 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger { // -------------------------------------------------------------------------- //! Create a new `FileLogger` that logs to a `FILE` stream. - ASMJIT_API FileLogger(FILE* stream = nullptr); + ASMJIT_API FileLogger(FILE* stream = nullptr) noexcept; //! Destroy the `FileLogger`. - ASMJIT_API virtual ~FileLogger(); + ASMJIT_API virtual ~FileLogger() noexcept; // -------------------------------------------------------------------------- // [Accessors] @@ -184,13 +183,13 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger { //! Get `FILE*` stream. //! //! \note Return value can be `nullptr`. - ASMJIT_INLINE FILE* getStream() const { + ASMJIT_INLINE FILE* getStream() const noexcept { return _stream; } //! Set `FILE*` stream, can be set to `nullptr` to disable logging, although - //! the `CodeGen` will still call `logString` even if there is no stream. - ASMJIT_INLINE void setStream(FILE* stream) { + //! the `ExternalTool` will still call `logString` even if there is no stream. + ASMJIT_INLINE void setStream(FILE* stream) noexcept { _stream = stream; } @@ -198,7 +197,7 @@ struct ASMJIT_VIRTAPI FileLogger : public Logger { // [Logging] // -------------------------------------------------------------------------- - ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex); + ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept; // -------------------------------------------------------------------------- // [Members] @@ -221,10 +220,10 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger { // -------------------------------------------------------------------------- //! Create new `StringLogger`. - ASMJIT_API StringLogger(); + ASMJIT_API StringLogger() noexcept; //! Destroy the `StringLogger`. - ASMJIT_API virtual ~StringLogger(); + ASMJIT_API virtual ~StringLogger() noexcept; // -------------------------------------------------------------------------- // [Accessors] @@ -233,17 +232,17 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger { //! Get `char*` pointer which represents the resulting string. //! //! The pointer is owned by `StringLogger`, it can't be modified or freed. - ASMJIT_INLINE const char* getString() const { + ASMJIT_INLINE const char* getString() const noexcept { return _stringBuilder.getData(); } //! Get the length of the string returned by `getString()`. - ASMJIT_INLINE size_t getLength() const { + ASMJIT_INLINE size_t getLength() const noexcept { return _stringBuilder.getLength(); } //! Clear the resulting string. - ASMJIT_INLINE void clearString() { + ASMJIT_INLINE void clearString() noexcept { _stringBuilder.clear(); } @@ -251,7 +250,7 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger { // [Logging] // -------------------------------------------------------------------------- - ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex); + ASMJIT_API virtual void logString(uint32_t style, const char* buf, size_t len = kInvalidIndex) noexcept; // -------------------------------------------------------------------------- // [Members] @@ -260,6 +259,9 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger { //! Output. StringBuilder _stringBuilder; }; +#else +struct Logger; +#endif // !ASMJIT_DISABLE_LOGGER //! \} @@ -269,5 +271,4 @@ struct ASMJIT_VIRTAPI StringLogger : public Logger { #include "../apiend.h" // [Guard] -#endif // !ASMJIT_DISABLE_LOGGER #endif // _ASMJIT_BASE_LOGGER_H diff --git a/src/asmjit/base/operand.h b/src/asmjit/base/operand.h index 296de77..5370543 100644 --- a/src/asmjit/base/operand.h +++ b/src/asmjit/base/operand.h @@ -26,38 +26,6 @@ struct Compiler; //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::OperandType] -// ============================================================================ - -//! Operand types that can be encoded in `Operand`. -ASMJIT_ENUM(OperandType) { - //! Invalid operand, used only internally (not initialized Operand). - kOperandTypeNone = 0, - //! Operand is a register. - kOperandTypeReg = 1, - //! Operand is a variable. - kOperandTypeVar = 2, - //! Operand is a memory. - kOperandTypeMem = 3, - //! Operand is an immediate value. - kOperandTypeImm = 4, - //! Operand is a label. - kOperandTypeLabel = 5 -}; - -// ============================================================================ -// [asmjit::OperandId] -// ============================================================================ - -//! Operand id masks used to determine the operand type. -ASMJIT_ENUM(OperandId) { - //! Operand id refers to `Var`. - kOperandIdVar = 0x80000000U, - //! Operand id to real index mask. - kOperandIdNum = 0x7FFFFFFFU -}; - // ============================================================================ // [asmjit::RegClass] // ============================================================================ @@ -72,7 +40,7 @@ ASMJIT_ENUM(RegClass) { // [asmjit::SizeDefs] // ============================================================================ -//! Common size of registers and pointers. +//! Common sizes of registers and data elements. ASMJIT_ENUM(SizeDefs) { //! 1 byte size (BYTE). kSizeByte = 1, @@ -87,7 +55,9 @@ ASMJIT_ENUM(SizeDefs) { //! 16 bytes size (OWORD / DQWORD). kSizeOWord = 16, //! 32 bytes size (YWORD / QQWORD). - kSizeYWord = 32 + kSizeYWord = 32, + //! 64 bytes size (ZWORD / DQQWORD). + kSizeZWord = 64 }; // ============================================================================ @@ -174,6 +144,40 @@ ASMJIT_ENUM(VarType) { //! Operand can contain register, memory location, immediate, or label. struct Operand { + // -------------------------------------------------------------------------- + // [Type] + // -------------------------------------------------------------------------- + + //! Operand types that can be encoded in \ref Operand. + ASMJIT_ENUM(Type) { + //! Invalid operand, used only internally (not initialized Operand). + kTypeNone = 0, + //! Operand is a register. + kTypeReg = 1, + //! Operand is a variable. + kTypeVar = 2, + //! Operand is a memory. + kTypeMem = 3, + //! Operand is an immediate value. + kTypeImm = 4, + //! Operand is a label. + kTypeLabel = 5 + }; + + // -------------------------------------------------------------------------- + // [Id] + // -------------------------------------------------------------------------- + + //! Operand ID masks used to determine the operand type. + ASMJIT_ENUM(IdTag) { + //! Operand id refers to a variable (\ref Var). + kIdVarTag = 0x80000000U, + //! Operand id refers to a label (\ref Label). + kIdLabelTag = 0x00000000U, + //! Valid bits stored in operand ID (for extracting array index from ID). + kIdIndexMask = 0x7FFFFFFFU + }; + // -------------------------------------------------------------------------- // [Structs] // -------------------------------------------------------------------------- @@ -182,9 +186,9 @@ struct Operand { //! //! Base operand data. struct BaseOp { - //! Type of operand, see \ref OperandType. + //! Type of the operand (see \ref Type). uint8_t op; - //! Size of operand (register, address, immediate, or variable). + //! Size of the operand (register, address, immediate, or variable). uint8_t size; //! \internal uint8_t reserved_2_1; @@ -206,9 +210,9 @@ struct Operand { //! //! Register or Variable operand data. struct VRegOp { - //! Type of operand, `kOperandTypeReg`. + //! Type of the operand (\ref kTypeReg or \ref kTypeVar). uint8_t op; - //! Size of register or variable. + //! Size of the operand (register or variable). uint8_t size; union { @@ -254,9 +258,9 @@ struct Operand { //! //! Memory or Variable operand data. struct VMemOp { - //! Type of operand, `kOperandTypeMem`. + //! Type of the operand (\ref kTypeMem). uint8_t op; - //! Size of the pointer in bytes. + //! Size of the memory in bytes or zero. uint8_t size; //! Type of the memory operand, see `MemType`. uint8_t type; @@ -277,9 +281,9 @@ struct Operand { //! //! Immediate operand data. struct ImmOp { - //! Type of operand, `kOperandTypeImm`. + //! Type of the operand (\ref kTypeImm). uint8_t op; - //! Size of immediate (or 0 to autodetect). + //! Size of the immediate (or 0 to autodetect). uint8_t size; //! \internal uint8_t reserved_2_1; @@ -321,9 +325,9 @@ struct Operand { //! //! Label operand data. struct LabelOp { - //! Type of operand, `kOperandTypeLabel`. + //! Type of the operand (\ref kTypeLabel). uint8_t op; - //! Always zero, labels don't have size. + //! Always zero. uint8_t size; //! \internal uint8_t reserved_2_1; @@ -345,27 +349,27 @@ struct Operand { // -------------------------------------------------------------------------- //! Create an uninitialized operand. - ASMJIT_INLINE Operand() { + ASMJIT_INLINE Operand() noexcept { reset(); } //! Create a reference to `other` operand. - ASMJIT_INLINE Operand(const Operand& other) { + ASMJIT_INLINE Operand(const Operand& other) noexcept { _init(other); } - explicit ASMJIT_INLINE Operand(const _NoInit&) {} + explicit ASMJIT_INLINE Operand(const _NoInit&) noexcept {} // -------------------------------------------------------------------------- // [Base] // -------------------------------------------------------------------------- //! Clone the `Operand`. - ASMJIT_INLINE Operand clone() const { return Operand(*this); } + ASMJIT_INLINE Operand clone() const noexcept { return Operand(*this); } //! Reset the `Operand`. - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeNone, 0, 0, 0, kInvalidValue); + ASMJIT_INLINE void reset() noexcept { + _init_packed_op_sz_b0_b1_id(kTypeNone, 0, 0, 0, kInvalidValue); _init_packed_d2_d3(0, 0); } @@ -376,11 +380,11 @@ struct Operand { //! \internal //! //! Initialize operand to `other` (used by constructors). - ASMJIT_INLINE void _init(const Operand& other) { + ASMJIT_INLINE void _init(const Operand& other) noexcept { ::memcpy(this, &other, sizeof(Operand)); } - ASMJIT_INLINE void _init_packed_op_sz_b0_b1_id(uint32_t op, uint32_t sz, uint32_t r0, uint32_t r1, uint32_t id) { + ASMJIT_INLINE void _init_packed_op_sz_b0_b1_id(uint32_t op, uint32_t sz, uint32_t r0, uint32_t r1, uint32_t id) noexcept { // This hack is not for performance, but to decrease the size of the binary // generated when constructing AsmJit operands (mostly for third parties). // Some compilers are not able to join four BYTE writes to a single DWORD @@ -390,22 +394,22 @@ struct Operand { _packed[0].setPacked_2x32(Utils::pack32_4x8(op, sz, r0, r1), id); } - ASMJIT_INLINE void _init_packed_op_sz_w0_id(uint32_t op, uint32_t sz, uint32_t w0, uint32_t id) { + ASMJIT_INLINE void _init_packed_op_sz_w0_id(uint32_t op, uint32_t sz, uint32_t w0, uint32_t id) noexcept { _packed[0].setPacked_2x32(Utils::pack32_2x8_1x16(op, sz, w0), id); } - ASMJIT_INLINE void _init_packed_d0_d1(uint32_t u0, uint32_t u1) { + ASMJIT_INLINE void _init_packed_d0_d1(uint32_t u0, uint32_t u1) noexcept { _packed[0].setPacked_2x32(u0, u1); } - ASMJIT_INLINE void _init_packed_d2_d3(uint32_t u2, uint32_t u3) { + ASMJIT_INLINE void _init_packed_d2_d3(uint32_t u2, uint32_t u3) noexcept { _packed[1].setPacked_2x32(u2, u3); } //! \internal //! //! Initialize operand to `other` (used by assign operators). - ASMJIT_INLINE void _copy(const Operand& other) { + ASMJIT_INLINE void _copy(const Operand& other) noexcept { ::memcpy(this, &other, sizeof(Operand)); } @@ -414,61 +418,65 @@ struct Operand { // -------------------------------------------------------------------------- template - ASMJIT_INLINE T& getData() { return reinterpret_cast(_base); } + ASMJIT_INLINE T& getData() noexcept { + return reinterpret_cast(_base); + } template - ASMJIT_INLINE const T& getData() const { return reinterpret_cast(_base); } + ASMJIT_INLINE const T& getData() const noexcept { + return reinterpret_cast(_base); + } // -------------------------------------------------------------------------- // [Type] // -------------------------------------------------------------------------- - //! Get type of the operand, see \ref OperandType. - ASMJIT_INLINE uint32_t getOp() const { return _base.op; } + //! Get type of the operand, see \ref Type. + ASMJIT_INLINE uint32_t getOp() const noexcept { return _base.op; } - //! Get whether the operand is none - `kOperandTypeNone`. - ASMJIT_INLINE bool isNone() const { return (_base.op == kOperandTypeNone); } - //! Get whether the operand is a register - `kOperandTypeReg`. - ASMJIT_INLINE bool isReg() const { return (_base.op == kOperandTypeReg); } - //! Get whether the operand is a variable - `kOperandTypeVar`. - ASMJIT_INLINE bool isVar() const { return (_base.op == kOperandTypeVar); } - //! Get whether the operand is a memory address - `kOperandTypeMem`. - ASMJIT_INLINE bool isMem() const { return (_base.op == kOperandTypeMem); } - //! Get whether the operand is an immediate value - `kOperandTypeImm`. - ASMJIT_INLINE bool isImm() const { return (_base.op == kOperandTypeImm); } - //! Get whether the operand is a label - `kOperandTypeLabel`. - ASMJIT_INLINE bool isLabel() const { return (_base.op == kOperandTypeLabel); } + //! Get whether the operand is none (\ref kTypeNone). + ASMJIT_INLINE bool isNone() const noexcept { return (_base.op == kTypeNone); } + //! Get whether the operand is a register (\ref kTypeReg). + ASMJIT_INLINE bool isReg() const noexcept { return (_base.op == kTypeReg); } + //! Get whether the operand is a variable (\ref kTypeVar). + ASMJIT_INLINE bool isVar() const noexcept { return (_base.op == kTypeVar); } + //! Get whether the operand is a memory location (\ref kTypeMem). + ASMJIT_INLINE bool isMem() const noexcept { return (_base.op == kTypeMem); } + //! Get whether the operand is an immediate (\ref kTypeImm). + ASMJIT_INLINE bool isImm() const noexcept { return (_base.op == kTypeImm); } + //! Get whether the operand is a label (\ref kTypeLabel). + ASMJIT_INLINE bool isLabel() const noexcept { return (_base.op == kTypeLabel); } // -------------------------------------------------------------------------- // [Type - Combined] // -------------------------------------------------------------------------- //! Get register type. - ASMJIT_INLINE uint32_t getRegType() const { return _vreg.type; } + ASMJIT_INLINE uint32_t getRegType() const noexcept { return _vreg.type; } //! Get register index. - ASMJIT_INLINE uint32_t getRegIndex() const { return _vreg.index; } + ASMJIT_INLINE uint32_t getRegIndex() const noexcept { return _vreg.index; } //! Get whether the operand is register of `type`. - ASMJIT_INLINE bool isRegType(uint32_t type) const { - return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFF00)) == Utils::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8)); + ASMJIT_INLINE bool isRegType(uint32_t type) const noexcept { + return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFF00)) == Utils::pack32_2x8_1x16(kTypeReg, 0, (type << 8)); } //! Get whether the operand is register and of `type` and `index`. - ASMJIT_INLINE bool isRegCode(uint32_t type, uint32_t index) const { - return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFFFF)) == Utils::pack32_2x8_1x16(kOperandTypeReg, 0, (type << 8) + index); + ASMJIT_INLINE bool isRegCode(uint32_t type, uint32_t index) const noexcept { + return (_packed[0].u32[0] & Utils::pack32_2x8_1x16(0xFF, 0, 0xFFFF)) == Utils::pack32_2x8_1x16(kTypeReg, 0, (type << 8) + index); } //! Get whether the operand is a register or memory. - ASMJIT_INLINE bool isRegOrMem() const { - ASMJIT_ASSERT(kOperandTypeReg == 1); - ASMJIT_ASSERT(kOperandTypeMem == 3); + ASMJIT_INLINE bool isRegOrMem() const noexcept { + ASMJIT_ASSERT(kTypeReg == 1); + ASMJIT_ASSERT(kTypeMem == 3); return (static_cast(_base.op) | 0x2U) == 0x3U; } //! Get whether the operand is variable or memory. - ASMJIT_INLINE bool isVarOrMem() const { - ASMJIT_ASSERT(kOperandTypeVar == 2); - ASMJIT_ASSERT(kOperandTypeMem == 3); + ASMJIT_INLINE bool isVarOrMem() const noexcept { + ASMJIT_ASSERT(kTypeVar == 2); + ASMJIT_ASSERT(kTypeMem == 3); return (static_cast(_base.op) - 2U) <= 1; } @@ -477,7 +485,7 @@ struct Operand { // -------------------------------------------------------------------------- //! Get size of the operand in bytes. - ASMJIT_INLINE uint32_t getSize() const { return _base.size; } + ASMJIT_INLINE uint32_t getSize() const noexcept { return _base.size; } // -------------------------------------------------------------------------- // [Id] @@ -489,7 +497,7 @@ struct Operand { //! //! There is no way to change or remove operand id. Unneeded operands can be //! simply reassigned by `operator=`. - ASMJIT_INLINE uint32_t getId() const { return _base.id; } + ASMJIT_INLINE uint32_t getId() const noexcept { return _base.id; } // -------------------------------------------------------------------------- // [Members] @@ -519,18 +527,18 @@ struct Operand { //! Operand utilities. struct OperandUtil { //! Make variable id. - static ASMJIT_INLINE uint32_t makeVarId(uint32_t id) { - return id | kOperandIdVar; + static ASMJIT_INLINE uint32_t makeVarId(uint32_t id) noexcept { + return id | Operand::kIdVarTag; } //! Make label id. - static ASMJIT_INLINE uint32_t makeLabelId(uint32_t id) { - return id; + static ASMJIT_INLINE uint32_t makeLabelId(uint32_t id) noexcept { + return id | Operand::kIdLabelTag; } //! Strip variable id bit so it becomes a pure index to `VarData[]` array. - static ASMJIT_INLINE uint32_t stripVarId(uint32_t id) { - return id & 0x7FFFFFFFU; + static ASMJIT_INLINE uint32_t stripVarId(uint32_t id) noexcept { + return id & Operand::kIdIndexMask; } //! Get whether the id refers to `Var`. @@ -538,14 +546,14 @@ struct OperandUtil { //! \note The function will never return `true` if the id is `kInvalidValue`. //! The trick is to compare a given id to -1 (kInvalidValue) so we check both //! using only one comparison. - static ASMJIT_INLINE bool isVarId(uint32_t id) { + static ASMJIT_INLINE bool isVarId(uint32_t id) noexcept { return static_cast(id) < -1; } //! Get whether the id refers to `Label`. //! //! \note The function will never return `true` if the id is `kInvalidValue`. - static ASMJIT_INLINE bool isLabelId(uint32_t id) { + static ASMJIT_INLINE bool isLabelId(uint32_t id) noexcept { return static_cast(id) >= 0; } }; @@ -561,106 +569,113 @@ struct Reg : public Operand { // -------------------------------------------------------------------------- //! Create a dummy base register. - ASMJIT_INLINE Reg() : Operand(NoInit) { - _init_packed_op_sz_w0_id(kOperandTypeReg, 0, (kInvalidReg << 8) + kInvalidReg, kInvalidValue); + ASMJIT_INLINE Reg() noexcept : Operand(NoInit) { + _init_packed_op_sz_w0_id(kTypeReg, 0, (kInvalidReg << 8) + kInvalidReg, kInvalidValue); _init_packed_d2_d3(kInvalidVar, 0); } //! Create a new base register. - ASMJIT_INLINE Reg(uint32_t type, uint32_t index, uint32_t size) : Operand(NoInit) { - _init_packed_op_sz_w0_id(kOperandTypeReg, size, (type << 8) + index, kInvalidValue); + ASMJIT_INLINE Reg(uint32_t type, uint32_t index, uint32_t size) noexcept : Operand(NoInit) { + _init_packed_op_sz_w0_id(kTypeReg, size, (type << 8) + index, kInvalidValue); _init_packed_d2_d3(kInvalidVar, 0); } //! Create a new reference to `other`. - ASMJIT_INLINE Reg(const Reg& other) : Operand(other) {} + ASMJIT_INLINE Reg(const Reg& other) noexcept : Operand(other) {} //! Create a new reference to `other` and change the index to `index`. - ASMJIT_INLINE Reg(const Reg& other, uint32_t index) : Operand(other) { + ASMJIT_INLINE Reg(const Reg& other, uint32_t index) noexcept : Operand(other) { _vreg.index = static_cast(index); } - explicit ASMJIT_INLINE Reg(const _NoInit&) : Operand(NoInit) {} + explicit ASMJIT_INLINE Reg(const _NoInit&) noexcept : Operand(NoInit) {} // -------------------------------------------------------------------------- // [Reg Specific] // -------------------------------------------------------------------------- //! Clone `Reg` operand. - ASMJIT_INLINE Reg clone() const { + ASMJIT_INLINE Reg clone() const noexcept { return Reg(*this); } //! Get whether register code is equal to `type`. - ASMJIT_INLINE bool isRegType(uint32_t type) const { + ASMJIT_INLINE bool isRegType(uint32_t type) const noexcept { return _vreg.type == type; } //! Get whether register code is equal to `type`. - ASMJIT_INLINE bool isRegCode(uint32_t code) const { + ASMJIT_INLINE bool isRegCode(uint32_t code) const noexcept { return _vreg.code == code; } //! Get whether register code is equal to `type`. - ASMJIT_INLINE bool isRegCode(uint32_t type, uint32_t index) const { + ASMJIT_INLINE bool isRegCode(uint32_t type, uint32_t index) const noexcept { return _vreg.code == (type << 8) + index; } //! Get register code that equals to '(type << 8) + index'. - ASMJIT_INLINE uint32_t getRegCode() const { + ASMJIT_INLINE uint32_t getRegCode() const noexcept { return _vreg.code; } //! Get register type. - ASMJIT_INLINE uint32_t getRegType() const { + ASMJIT_INLINE uint32_t getRegType() const noexcept { return _vreg.type; } //! Get register index. - ASMJIT_INLINE uint32_t getRegIndex() const { + ASMJIT_INLINE uint32_t getRegIndex() const noexcept { return _vreg.index; } #define ASMJIT_REG_OP(_Type_) \ - ASMJIT_INLINE _Type_ clone() const { \ + ASMJIT_INLINE _Type_ clone() const ASMJIT_NOEXCEPT { \ return _Type_(*this); \ } \ \ /*! Set register `size`. */ \ - ASMJIT_INLINE _Type_& setSize(uint32_t size) { \ + ASMJIT_INLINE _Type_& setSize(uint32_t size) ASMJIT_NOEXCEPT { \ _vreg.size = static_cast(size); \ return *this; \ } \ \ /*! Set register `code`. */ \ - ASMJIT_INLINE _Type_& setCode(uint32_t code) { \ + ASMJIT_INLINE _Type_& setCode(uint32_t code) ASMJIT_NOEXCEPT { \ _vreg.code = static_cast(code); \ return *this; \ } \ \ /*! Set register `type` and `index`. */ \ - ASMJIT_INLINE _Type_& setCode(uint32_t type, uint32_t index) { \ + ASMJIT_INLINE _Type_& setCode(uint32_t type, uint32_t index) ASMJIT_NOEXCEPT { \ _vreg.type = static_cast(type); \ _vreg.index = static_cast(index); \ return *this; \ } \ \ /*! Set register `type`. */ \ - ASMJIT_INLINE _Type_& setType(uint32_t type) { \ + ASMJIT_INLINE _Type_& setType(uint32_t type) ASMJIT_NOEXCEPT { \ _vreg.type = static_cast(type); \ return *this; \ } \ \ /*! Set register `index`. */ \ - ASMJIT_INLINE _Type_& setIndex(uint32_t index) { \ + ASMJIT_INLINE _Type_& setIndex(uint32_t index) ASMJIT_NOEXCEPT { \ _vreg.index = static_cast(index); \ return *this; \ } \ \ - ASMJIT_INLINE _Type_& operator=(const _Type_& other) { _copy(other); return *this; } \ + ASMJIT_INLINE _Type_& operator=(const _Type_& other) ASMJIT_NOEXCEPT { \ + _copy(other); return *this; \ + } \ \ - ASMJIT_INLINE bool operator==(const _Type_& other) const { return _packed[0].u32[0] == other._packed[0].u32[0]; } \ - ASMJIT_INLINE bool operator!=(const _Type_& other) const { return !operator==(other); } + ASMJIT_INLINE bool operator==(const _Type_& other) const ASMJIT_NOEXCEPT { \ + return _packed[0].u32[0] == other._packed[0].u32[0]; \ + } \ + \ + ASMJIT_INLINE bool operator!=(const _Type_& other) const ASMJIT_NOEXCEPT { \ + return !operator==(other); \ + } }; // ============================================================================ @@ -673,62 +688,62 @@ struct BaseMem : public Operand { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE BaseMem() : Operand(NoInit) { + ASMJIT_INLINE BaseMem() noexcept : Operand(NoInit) { reset(); } - ASMJIT_INLINE BaseMem(const BaseMem& other) : Operand(other) {} - explicit ASMJIT_INLINE BaseMem(const _NoInit&) : Operand(NoInit) {} + ASMJIT_INLINE BaseMem(const BaseMem& other) noexcept : Operand(other) {} + explicit ASMJIT_INLINE BaseMem(const _NoInit&) noexcept : Operand(NoInit) {} // -------------------------------------------------------------------------- // [BaseMem Specific] // -------------------------------------------------------------------------- //! Clone `BaseMem` operand. - ASMJIT_INLINE BaseMem clone() const { + ASMJIT_INLINE BaseMem clone() const noexcept { return BaseMem(*this); } //! Reset `BaseMem` operand. - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, 0, kMemTypeBaseIndex, 0, kInvalidValue); + ASMJIT_INLINE void reset() noexcept { + _init_packed_op_sz_b0_b1_id(kTypeMem, 0, kMemTypeBaseIndex, 0, kInvalidValue); _init_packed_d2_d3(kInvalidValue, 0); } //! Get the type of the memory operand, see `MemType`. - ASMJIT_INLINE uint32_t getMemType() const { + ASMJIT_INLINE uint32_t getMemType() const noexcept { return _vmem.type; } //! Get whether the type of the memory operand is either `kMemTypeBaseIndex` //! or `kMemTypeStackIndex`. - ASMJIT_INLINE bool isBaseIndexType() const { + ASMJIT_INLINE bool isBaseIndexType() const noexcept { return _vmem.type <= kMemTypeStackIndex; } //! Get whether the memory operand has base register. - ASMJIT_INLINE bool hasBase() const { + ASMJIT_INLINE bool hasBase() const noexcept { return _vmem.base != kInvalidValue; } //! Get memory operand base id, or `kInvalidValue`. - ASMJIT_INLINE uint32_t getBase() const { + ASMJIT_INLINE uint32_t getBase() const noexcept { return _vmem.base; } //! Set memory operand size. - ASMJIT_INLINE BaseMem& setSize(uint32_t size) { + ASMJIT_INLINE BaseMem& setSize(uint32_t size) noexcept { _vmem.size = static_cast(size); return *this; } //! Get memory operand relative displacement. - ASMJIT_INLINE int32_t getDisplacement() const { + ASMJIT_INLINE int32_t getDisplacement() const noexcept { return _vmem.displacement; } //! Set memory operand relative displacement. - ASMJIT_INLINE BaseMem& setDisplacement(int32_t disp) { + ASMJIT_INLINE BaseMem& setDisplacement(int32_t disp) noexcept { _vmem.displacement = disp; return *this; } @@ -737,16 +752,16 @@ struct BaseMem : public Operand { // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE BaseMem& operator=(const BaseMem& other) { + ASMJIT_INLINE BaseMem& operator=(const BaseMem& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const BaseMem& other) const { + ASMJIT_INLINE bool operator==(const BaseMem& other) const noexcept { return (_packed[0] == other._packed[0]) & (_packed[1] == other._packed[1]); } - ASMJIT_INLINE bool operator!=(const BaseMem& other) const { + ASMJIT_INLINE bool operator!=(const BaseMem& other) const noexcept { return !(*this == other); } }; @@ -769,65 +784,65 @@ struct Imm : public Operand { // -------------------------------------------------------------------------- //! Create a new immediate value (initial value is 0). - Imm() : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeImm, 0, 0, 0, kInvalidValue); + Imm() noexcept : Operand(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeImm, 0, 0, 0, kInvalidValue); _imm.value._i64[0] = 0; } //! Create a new signed immediate value, assigning the value to `val`. - explicit Imm(int64_t val) : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeImm, 0, 0, 0, kInvalidValue); + explicit Imm(int64_t val) noexcept : Operand(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeImm, 0, 0, 0, kInvalidValue); _imm.value._i64[0] = val; } //! Create a new immediate value from `other`. - ASMJIT_INLINE Imm(const Imm& other) : Operand(other) {} + ASMJIT_INLINE Imm(const Imm& other) noexcept : Operand(other) {} - explicit ASMJIT_INLINE Imm(const _NoInit&) : Operand(NoInit) {} + explicit ASMJIT_INLINE Imm(const _NoInit&) noexcept : Operand(NoInit) {} // -------------------------------------------------------------------------- // [Immediate Specific] // -------------------------------------------------------------------------- //! Clone `Imm` operand. - ASMJIT_INLINE Imm clone() const { + ASMJIT_INLINE Imm clone() const noexcept { return Imm(*this); } //! Get whether the immediate can be casted to 8-bit signed integer. - ASMJIT_INLINE bool isInt8() const { return Utils::isInt8(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt8() const noexcept { return Utils::isInt8(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 8-bit unsigned integer. - ASMJIT_INLINE bool isUInt8() const { return Utils::isUInt8(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt8() const noexcept { return Utils::isUInt8(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 16-bit signed integer. - ASMJIT_INLINE bool isInt16() const { return Utils::isInt16(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt16() const noexcept { return Utils::isInt16(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 16-bit unsigned integer. - ASMJIT_INLINE bool isUInt16() const { return Utils::isUInt16(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt16() const noexcept { return Utils::isUInt16(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 32-bit signed integer. - ASMJIT_INLINE bool isInt32() const { return Utils::isInt32(_imm.value._i64[0]); } + ASMJIT_INLINE bool isInt32() const noexcept { return Utils::isInt32(_imm.value._i64[0]); } //! Get whether the immediate can be casted to 32-bit unsigned integer. - ASMJIT_INLINE bool isUInt32() const { return Utils::isUInt32(_imm.value._i64[0]); } + ASMJIT_INLINE bool isUInt32() const noexcept { return Utils::isUInt32(_imm.value._i64[0]); } //! Get immediate value as 8-bit signed integer. - ASMJIT_INLINE int8_t getInt8() const { return _imm.value._i8[_ASMJIT_ARCH_INDEX(8, 0)]; } + ASMJIT_INLINE int8_t getInt8() const noexcept { return _imm.value._i8[_ASMJIT_ARCH_INDEX(8, 0)]; } //! Get immediate value as 8-bit unsigned integer. - ASMJIT_INLINE uint8_t getUInt8() const { return _imm.value._u8[_ASMJIT_ARCH_INDEX(8, 0)]; } + ASMJIT_INLINE uint8_t getUInt8() const noexcept { return _imm.value._u8[_ASMJIT_ARCH_INDEX(8, 0)]; } //! Get immediate value as 16-bit signed integer. - ASMJIT_INLINE int16_t getInt16() const { return _imm.value._i16[_ASMJIT_ARCH_INDEX(4, 0)]; } + ASMJIT_INLINE int16_t getInt16() const noexcept { return _imm.value._i16[_ASMJIT_ARCH_INDEX(4, 0)]; } //! Get immediate value as 16-bit unsigned integer. - ASMJIT_INLINE uint16_t getUInt16() const { return _imm.value._u16[_ASMJIT_ARCH_INDEX(4, 0)]; } + ASMJIT_INLINE uint16_t getUInt16() const noexcept { return _imm.value._u16[_ASMJIT_ARCH_INDEX(4, 0)]; } //! Get immediate value as 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32() const { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 0)]; } + ASMJIT_INLINE int32_t getInt32() const noexcept { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 0)]; } //! Get immediate value as 32-bit unsigned integer. - ASMJIT_INLINE uint32_t getUInt32() const { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 0)]; } + ASMJIT_INLINE uint32_t getUInt32() const noexcept { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 0)]; } //! Get immediate value as 64-bit signed integer. - ASMJIT_INLINE int64_t getInt64() const { return _imm.value._i64[0]; } + ASMJIT_INLINE int64_t getInt64() const noexcept { return _imm.value._i64[0]; } //! Get immediate value as 64-bit unsigned integer. - ASMJIT_INLINE uint64_t getUInt64() const { return _imm.value._u64[0]; } + ASMJIT_INLINE uint64_t getUInt64() const noexcept { return _imm.value._u64[0]; } //! Get immediate value as `intptr_t`. - ASMJIT_INLINE intptr_t getIntPtr() const { + ASMJIT_INLINE intptr_t getIntPtr() const noexcept { if (sizeof(intptr_t) == sizeof(int64_t)) return static_cast(getInt64()); else @@ -835,7 +850,7 @@ struct Imm : public Operand { } //! Get immediate value as `uintptr_t`. - ASMJIT_INLINE uintptr_t getUIntPtr() const { + ASMJIT_INLINE uintptr_t getUIntPtr() const noexcept { if (sizeof(uintptr_t) == sizeof(uint64_t)) return static_cast(getUInt64()); else @@ -843,17 +858,17 @@ struct Imm : public Operand { } //! Get low 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32Lo() const { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 0)]; } + ASMJIT_INLINE int32_t getInt32Lo() const noexcept { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 0)]; } //! Get low 32-bit signed integer. - ASMJIT_INLINE uint32_t getUInt32Lo() const { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 0)]; } + ASMJIT_INLINE uint32_t getUInt32Lo() const noexcept { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 0)]; } //! Get high 32-bit signed integer. - ASMJIT_INLINE int32_t getInt32Hi() const { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 1)]; } + ASMJIT_INLINE int32_t getInt32Hi() const noexcept { return _imm.value._i32[_ASMJIT_ARCH_INDEX(2, 1)]; } //! Get high 32-bit signed integer. - ASMJIT_INLINE uint32_t getUInt32Hi() const { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 1)]; } + ASMJIT_INLINE uint32_t getUInt32Hi() const noexcept { return _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 1)]; } //! Set immediate value to 8-bit signed integer `val`. - ASMJIT_INLINE Imm& setInt8(int8_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setInt8(int8_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._i64[0] = static_cast(val); } else { @@ -865,8 +880,8 @@ struct Imm : public Operand { } //! Set immediate value to 8-bit unsigned integer `val`. - ASMJIT_INLINE Imm& setUInt8(uint8_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setUInt8(uint8_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._u64[0] = static_cast(val); } else { @@ -877,8 +892,8 @@ struct Imm : public Operand { } //! Set immediate value to 16-bit signed integer `val`. - ASMJIT_INLINE Imm& setInt16(int16_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setInt16(int16_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._i64[0] = static_cast(val); } else { @@ -890,8 +905,8 @@ struct Imm : public Operand { } //! Set immediate value to 16-bit unsigned integer `val`. - ASMJIT_INLINE Imm& setUInt16(uint16_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setUInt16(uint16_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._u64[0] = static_cast(val); } else { @@ -902,8 +917,8 @@ struct Imm : public Operand { } //! Set immediate value to 32-bit signed integer `val`. - ASMJIT_INLINE Imm& setInt32(int32_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setInt32(int32_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._i64[0] = static_cast(val); } else { @@ -914,8 +929,8 @@ struct Imm : public Operand { } //! Set immediate value to 32-bit unsigned integer `val`. - ASMJIT_INLINE Imm& setUInt32(uint32_t val) { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& setUInt32(uint32_t val) noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._u64[0] = static_cast(val); } else { @@ -926,43 +941,45 @@ struct Imm : public Operand { } //! Set immediate value to 64-bit signed integer `val`. - ASMJIT_INLINE Imm& setInt64(int64_t val) { + ASMJIT_INLINE Imm& setInt64(int64_t val) noexcept { _imm.value._i64[0] = val; return *this; } //! Set immediate value to 64-bit unsigned integer `val`. - ASMJIT_INLINE Imm& setUInt64(uint64_t val) { + ASMJIT_INLINE Imm& setUInt64(uint64_t val) noexcept { _imm.value._u64[0] = val; return *this; } //! Set immediate value to intptr_t `val`. - ASMJIT_INLINE Imm& setIntPtr(intptr_t val) { + ASMJIT_INLINE Imm& setIntPtr(intptr_t val) noexcept { _imm.value._i64[0] = static_cast(val); return *this; } //! Set immediate value to uintptr_t `val`. - ASMJIT_INLINE Imm& setUIntPtr(uintptr_t val) { + ASMJIT_INLINE Imm& setUIntPtr(uintptr_t val) noexcept { _imm.value._u64[0] = static_cast(val); return *this; } //! Set immediate value as unsigned type to `val`. - ASMJIT_INLINE Imm& setPtr(void* p) { return setIntPtr((intptr_t)p); } + ASMJIT_INLINE Imm& setPtr(void* p) noexcept { + return setIntPtr((intptr_t)p); + } // -------------------------------------------------------------------------- // [Float] // -------------------------------------------------------------------------- - ASMJIT_INLINE Imm& setFloat(float f) { + ASMJIT_INLINE Imm& setFloat(float f) noexcept { _imm.value._f32[_ASMJIT_ARCH_INDEX(2, 0)] = f; _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 1)] = 0; return *this; } - ASMJIT_INLINE Imm& setDouble(double d) { + ASMJIT_INLINE Imm& setDouble(double d) noexcept { _imm.value._f64[0] = d; return *this; } @@ -971,8 +988,8 @@ struct Imm : public Operand { // [Truncate] // -------------------------------------------------------------------------- - ASMJIT_INLINE Imm& truncateTo8Bits() { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& truncateTo8Bits() noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._u64[0] &= static_cast(0x000000FFU); } else { @@ -982,8 +999,8 @@ struct Imm : public Operand { return *this; } - ASMJIT_INLINE Imm& truncateTo16Bits() { - if (kArchHost64Bit) { + ASMJIT_INLINE Imm& truncateTo16Bits() noexcept { + if (ASMJIT_ARCH_64BIT) { _imm.value._u64[0] &= static_cast(0x0000FFFFU); } else { @@ -993,7 +1010,7 @@ struct Imm : public Operand { return *this; } - ASMJIT_INLINE Imm& truncateTo32Bits() { + ASMJIT_INLINE Imm& truncateTo32Bits() noexcept { _imm.value._u32[_ASMJIT_ARCH_INDEX(2, 1)] = 0; return *this; } @@ -1003,7 +1020,7 @@ struct Imm : public Operand { // -------------------------------------------------------------------------- //! Assign `other` to the immediate operand. - ASMJIT_INLINE Imm& operator=(const Imm& other) { + ASMJIT_INLINE Imm& operator=(const Imm& other) noexcept { _copy(other); return *this; } @@ -1015,29 +1032,29 @@ struct Imm : public Operand { //! Label (jump target or data location). //! -//! Label represents a location in code typically used as jump targets, but may -//! be also reference data or static variables. Label has to be explicitly -//! created by a code-generator by calling `CodeGen::newLabel()` where `CodeGen` -//! is your code generator, which derives from `Assembler` or `Compiler`. +//! Label represents a location in code typically used as a jump target, but +//! may be also a reference to some data or a static variable. Label has to be +//! explicitly created by the `Assembler` or any `ExternalTool` by using their +//! `newLabel()` function. //! //! Example of using labels: //! //! ~~~ //! // Create Assembler/Compiler. -//! host::Assembler a; +//! X86Assembler a; //! //! // Create Label instance. -//! Label L_1(a); +//! Label L1 = a.newLabel(); //! //! // ... your code ... //! //! // Using label. -//! a.jump(L_1); +//! a.jump(L1); //! //! // ... your code ... //! -//! // Bind label to the current position, see `CodeGen::bind()`. -//! a.bind(L_1); +//! // Bind label to the current position, see `Assembler::bind()`. +//! a.bind(L1); //! ~~~ struct Label : public Operand { // -------------------------------------------------------------------------- @@ -1045,31 +1062,26 @@ struct Label : public Operand { // -------------------------------------------------------------------------- //! Create new, unassociated label. - ASMJIT_INLINE Label() : Operand(NoInit) { + ASMJIT_INLINE Label() noexcept : Operand(NoInit) { reset(); } - explicit ASMJIT_INLINE Label(uint32_t id) : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, id); + explicit ASMJIT_INLINE Label(uint32_t id) noexcept : Operand(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeLabel, 0, 0, 0, id); _init_packed_d2_d3(0, 0); } - //! Create new initialized label. - explicit ASMJIT_INLINE Label(Assembler& a); - //! Create new initialized label. - explicit ASMJIT_INLINE Label(Compiler& c); - //! Create reference to another label. - ASMJIT_INLINE Label(const Label& other) : Operand(other) {} + ASMJIT_INLINE Label(const Label& other) noexcept : Operand(other) {} - explicit ASMJIT_INLINE Label(const _NoInit&) : Operand(NoInit) {} + explicit ASMJIT_INLINE Label(const _NoInit&) noexcept : Operand(NoInit) {} // -------------------------------------------------------------------------- // [Reset] // -------------------------------------------------------------------------- - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeLabel, 0, 0, 0, kInvalidValue); + ASMJIT_INLINE void reset() noexcept { + _init_packed_op_sz_b0_b1_id(kTypeLabel, 0, 0, 0, kInvalidValue); _init_packed_d2_d3(0, 0); } @@ -1078,16 +1090,16 @@ struct Label : public Operand { // -------------------------------------------------------------------------- //! Get whether the label has been initialized by `Assembler` or `Compiler`. - ASMJIT_INLINE bool isInitialized() const { return _label.id != kInvalidValue; } + ASMJIT_INLINE bool isInitialized() const noexcept { return _label.id != kInvalidValue; } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE Label& operator=(const Label& other) { _copy(other); return *this; } + ASMJIT_INLINE Label& operator=(const Label& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const Label& other) const { return _base.id == other._base.id; } - ASMJIT_INLINE bool operator!=(const Label& other) const { return _base.id != other._base.id; } + ASMJIT_INLINE bool operator==(const Label& other) const noexcept { return _base.id == other._base.id; } + ASMJIT_INLINE bool operator!=(const Label& other) const noexcept { return _base.id != other._base.id; } }; // ============================================================================ @@ -1101,41 +1113,41 @@ struct Var : public Operand { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE Var() : Operand(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, 0, 0, kInvalidValue); + ASMJIT_INLINE Var() noexcept : Operand(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeVar, 0, 0, 0, kInvalidValue); _init_packed_d2_d3(kInvalidValue, kInvalidValue); } - ASMJIT_INLINE Var(const Var& other) : Operand(other) {} + ASMJIT_INLINE Var(const Var& other) noexcept : Operand(other) {} - explicit ASMJIT_INLINE Var(const _NoInit&) : Operand(NoInit) {} + explicit ASMJIT_INLINE Var(const _NoInit&) noexcept : Operand(NoInit) {} // -------------------------------------------------------------------------- // [Var Specific] // -------------------------------------------------------------------------- //! Clone `Var` operand. - ASMJIT_INLINE Var clone() const { return Var(*this); } + ASMJIT_INLINE Var clone() const noexcept { return Var(*this); } //! Reset Var operand. - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeVar, 0, kInvalidReg, kInvalidReg, kInvalidValue); + ASMJIT_INLINE void reset() noexcept { + _init_packed_op_sz_b0_b1_id(kTypeVar, 0, kInvalidReg, kInvalidReg, kInvalidValue); _init_packed_d2_d3(kInvalidValue, kInvalidValue); } //! Get whether the variable has been initialized by `Compiler`. - ASMJIT_INLINE bool isInitialized() const { return _vreg.id != kInvalidValue; } + ASMJIT_INLINE bool isInitialized() const noexcept { return _vreg.id != kInvalidValue; } //! Get variable type. - ASMJIT_INLINE uint32_t getVarType() const { return _vreg.vType; } + ASMJIT_INLINE uint32_t getVarType() const noexcept { return _vreg.vType; } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE Var& operator=(const Var& other) { _copy(other); return *this; } + ASMJIT_INLINE Var& operator=(const Var& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const Var& other) const { return _packed[0] == other._packed[0]; } - ASMJIT_INLINE bool operator!=(const Var& other) const { return !operator==(other); } + ASMJIT_INLINE bool operator==(const Var& other) const noexcept { return _packed[0] == other._packed[0]; } + ASMJIT_INLINE bool operator!=(const Var& other) const noexcept { return !operator==(other); } }; #endif // !ASMJIT_DISABLE_COMPILER @@ -1148,12 +1160,20 @@ struct Var : public Operand { ASMJIT_VARAPI const Operand noOperand; //! Create a signed immediate operand. -static ASMJIT_INLINE Imm imm(int64_t val) { return Imm(val); } +static ASMJIT_INLINE Imm imm(int64_t val) noexcept { + return Imm(val); +} + //! Create an unsigned immediate operand. -static ASMJIT_INLINE Imm imm_u(uint64_t val) { return Imm(static_cast(val)); } +static ASMJIT_INLINE Imm imm_u(uint64_t val) noexcept { + return Imm(static_cast(val)); +} + //! Create a `void*` immediate operand. template -static ASMJIT_INLINE Imm imm_ptr(T p) { return Imm(static_cast((intptr_t)p)); } +static ASMJIT_INLINE Imm imm_ptr(T p) noexcept { + return Imm(static_cast((intptr_t)p)); +} //! \} diff --git a/src/asmjit/base/podvector.cpp b/src/asmjit/base/podvector.cpp new file mode 100644 index 0000000..0cee16a --- /dev/null +++ b/src/asmjit/base/podvector.cpp @@ -0,0 +1,132 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Export] +#define ASMJIT_EXPORTS + +// [Dependencies - AsmJit] +#include "../base/podvector.h" +#include "../base/utils.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +// ============================================================================ +// [asmjit::PodVectorBase - NullData] +// ============================================================================ + +const PodVectorBase::Data PodVectorBase::_nullData = { 0, 0 }; + +static ASMJIT_INLINE bool isDataStatic(PodVectorBase* self, PodVectorBase::Data* d) noexcept { + return (void*)(self + 1) == (void*)d; +} + +// ============================================================================ +// [asmjit::PodVectorBase - Reset] +// ============================================================================ + +//! Clear vector data and free internal buffer. +void PodVectorBase::reset(bool releaseMemory) noexcept { + Data* d = _d; + if (d == &_nullData) + return; + + if (releaseMemory && !isDataStatic(this, d)) { + ASMJIT_FREE(d); + _d = const_cast(&_nullData); + return; + } + + d->length = 0; +} + +// ============================================================================ +// [asmjit::PodVectorBase - Helpers] +// ============================================================================ + +Error PodVectorBase::_grow(size_t n, size_t sizeOfT) noexcept { + Data* d = _d; + + size_t threshold = kMemAllocGrowMax / sizeOfT; + size_t capacity = d->capacity; + size_t after = d->length; + + if (IntTraits::maxValue() - n < after) + return kErrorNoHeapMemory; + + after += n; + + if (capacity >= after) + return kErrorOk; + + // PodVector is used as a linear array for some data structures used by + // AsmJit code generation. The purpose of this agressive growing schema + // is to minimize memory reallocations, because AsmJit code generation + // classes live short life and will be freed or reused soon. + if (capacity < 32) + capacity = 32; + else if (capacity < 128) + capacity = 128; + else if (capacity < 512) + capacity = 512; + + while (capacity < after) { + if (capacity < threshold) + capacity *= 2; + else + capacity += threshold; + } + + return _reserve(capacity, sizeOfT); +} + +Error PodVectorBase::_reserve(size_t n, size_t sizeOfT) noexcept { + Data* d = _d; + + if (d->capacity >= n) + return kErrorOk; + + size_t nBytes = sizeof(Data) + n * sizeOfT; + if (ASMJIT_UNLIKELY(nBytes < n)) + return kErrorNoHeapMemory; + + if (d == &_nullData) { + d = static_cast(ASMJIT_ALLOC(nBytes)); + if (ASMJIT_UNLIKELY(d == nullptr)) + return kErrorNoHeapMemory; + d->length = 0; + } + else { + if (isDataStatic(this, d)) { + Data* oldD = d; + + d = static_cast(ASMJIT_ALLOC(nBytes)); + if (ASMJIT_UNLIKELY(d == nullptr)) + return kErrorNoHeapMemory; + + size_t len = d->length; + d->length = len; + ::memcpy(d, oldD->getData(), len * sizeOfT); + } + else { + d = static_cast(ASMJIT_REALLOC(d, nBytes)); + if (ASMJIT_UNLIKELY(d == nullptr)) + return kErrorNoHeapMemory; + } + } + + d->capacity = n; + _d = d; + + return kErrorOk; +} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" diff --git a/src/asmjit/base/podvector.h b/src/asmjit/base/podvector.h new file mode 100644 index 0000000..51aa8c8 --- /dev/null +++ b/src/asmjit/base/podvector.h @@ -0,0 +1,278 @@ +// [AsmJit] +// Complete x86/x64 JIT and Remote Assembler for C++. +// +// [License] +// Zlib - See LICENSE.md file in the package. + +// [Guard] +#ifndef _ASMJIT_BASE_PODVECTOR_H +#define _ASMJIT_BASE_PODVECTOR_H + +// [Dependencies - AsmJit] +#include "../base/globals.h" + +// [Api-Begin] +#include "../apibegin.h" + +namespace asmjit { + +//! \addtogroup asmjit_base +//! \{ + +// ============================================================================ +// [asmjit::PodVectorBase] +// ============================================================================ + +//! \internal +struct PodVectorBase { + // -------------------------------------------------------------------------- + // [Data] + // -------------------------------------------------------------------------- + + //! \internal + struct Data { + //! Get data. + ASMJIT_INLINE void* getData() const noexcept { + return static_cast(const_cast(this + 1)); + } + + //! Capacity of the vector. + size_t capacity; + //! Length of the vector. + size_t length; + }; + + static ASMJIT_API const Data _nullData; + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new instance of `PodVectorBase`. + ASMJIT_INLINE PodVectorBase() noexcept : _d(const_cast(&_nullData)) {} + //! Destroy the `PodVectorBase` and its data. + ASMJIT_INLINE ~PodVectorBase() noexcept { reset(true); } + +protected: + explicit ASMJIT_INLINE PodVectorBase(Data* d) noexcept : _d(d) {} + + // -------------------------------------------------------------------------- + // [Reset] + // -------------------------------------------------------------------------- + +public: + //! Reset the vector data and set its `length` to zero. + //! + //! If `releaseMemory` is true the vector buffer will be released to the + //! system. + ASMJIT_API void reset(bool releaseMemory = false) noexcept; + + // -------------------------------------------------------------------------- + // [Grow / Reserve] + // -------------------------------------------------------------------------- + +protected: + ASMJIT_API Error _grow(size_t n, size_t sizeOfT) noexcept; + ASMJIT_API Error _reserve(size_t n, size_t sizeOfT) noexcept; + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + +public: + Data* _d; +}; + +// ============================================================================ +// [asmjit::PodVector] +// ============================================================================ + +//! Template used to store and manage array of POD data. +//! +//! This template has these adventages over other vector<> templates: +//! - Non-copyable (designed to be non-copyable, we want it) +//! - No copy-on-write (some implementations of stl can use it) +//! - Optimized for working only with POD types +//! - Uses ASMJIT_... memory management macros +template +struct PodVector : PodVectorBase { + ASMJIT_NO_COPY(PodVector) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + //! Create a new instance of `PodVector`. + ASMJIT_INLINE PodVector() noexcept {} + //! Destroy the `PodVector` and its data. + ASMJIT_INLINE ~PodVector() noexcept {} + +protected: + explicit ASMJIT_INLINE PodVector(Data* d) noexcept : PodVectorBase(d) {} + + // -------------------------------------------------------------------------- + // [Data] + // -------------------------------------------------------------------------- + +public: + //! Get whether the vector is empty. + ASMJIT_INLINE bool isEmpty() const noexcept { return _d->length == 0; } + //! Get length. + ASMJIT_INLINE size_t getLength() const noexcept { return _d->length; } + //! Get capacity. + ASMJIT_INLINE size_t getCapacity() const noexcept { return _d->capacity; } + //! Get data. + ASMJIT_INLINE T* getData() noexcept { return static_cast(_d->getData()); } + //! \overload + ASMJIT_INLINE const T* getData() const noexcept { return static_cast(_d->getData()); } + + // -------------------------------------------------------------------------- + // [Grow / Reserve] + // -------------------------------------------------------------------------- + + //! Called to grow the buffer to fit at least `n` elements more. + ASMJIT_INLINE Error _grow(size_t n) noexcept { return PodVectorBase::_grow(n, sizeof(T)); } + //! Realloc internal array to fit at least `n` items. + ASMJIT_INLINE Error _reserve(size_t n) noexcept { return PodVectorBase::_reserve(n, sizeof(T)); } + + // -------------------------------------------------------------------------- + // [Ops] + // -------------------------------------------------------------------------- + + //! Prepend `item` to vector. + Error prepend(const T& item) noexcept { + Data* d = _d; + + if (d->length == d->capacity) { + ASMJIT_PROPAGATE_ERROR(_grow(1)); + _d = d; + } + + ::memmove(static_cast(d->getData()) + 1, d->getData(), d->length * sizeof(T)); + ::memcpy(d->getData(), &item, sizeof(T)); + + d->length++; + return kErrorOk; + } + + //! Insert an `item` at the `index`. + Error insert(size_t index, const T& item) noexcept { + Data* d = _d; + ASMJIT_ASSERT(index <= d->length); + + if (d->length == d->capacity) { + ASMJIT_PROPAGATE_ERROR(_grow(1)); + d = _d; + } + + T* dst = static_cast(d->getData()) + index; + ::memmove(dst + 1, dst, d->length - index); + ::memcpy(dst, &item, sizeof(T)); + + d->length++; + return kErrorOk; + } + + //! Append `item` to vector. + Error append(const T& item) noexcept { + Data* d = _d; + + if (d->length == d->capacity) { + ASMJIT_PROPAGATE_ERROR(_grow(1)); + d = _d; + } + + ::memcpy(static_cast(d->getData()) + d->length, &item, sizeof(T)); + + d->length++; + return kErrorOk; + } + + //! Get index of `val` or `kInvalidIndex` if not found. + size_t indexOf(const T& val) const noexcept { + Data* d = _d; + + const T* data = static_cast(d->getData()); + size_t len = d->length; + + for (size_t i = 0; i < len; i++) + if (data[i] == val) + return i; + + return kInvalidIndex; + } + + //! Remove item at index `i`. + void removeAt(size_t i) noexcept { + Data* d = _d; + ASMJIT_ASSERT(i < d->length); + + T* data = static_cast(d->getData()) + i; + d->length--; + ::memmove(data, data + 1, d->length - i); + } + + //! Swap this pod-vector with `other`. + void swap(PodVector& other) noexcept { + T* otherData = other._d; + other._d = _d; + _d = otherData; + } + + //! Get item at index `i`. + ASMJIT_INLINE T& operator[](size_t i) noexcept { + ASMJIT_ASSERT(i < getLength()); + return getData()[i]; + } + + //! Get item at index `i`. + ASMJIT_INLINE const T& operator[](size_t i) const noexcept { + ASMJIT_ASSERT(i < getLength()); + return getData()[i]; + } +}; + +// ============================================================================ +// [asmjit::PodVectorTmp] +// ============================================================================ + +template +struct PodVectorTmp : public PodVector { + ASMJIT_NO_COPY(PodVectorTmp) + + // -------------------------------------------------------------------------- + // [StaticData] + // -------------------------------------------------------------------------- + + struct StaticData : public PodVectorBase::Data { + char data[sizeof(T) * N]; + }; + + // -------------------------------------------------------------------------- + // [Data] + // -------------------------------------------------------------------------- + + //! Create a new instance of `PodVectorTmp`. + ASMJIT_INLINE PodVectorTmp() noexcept : PodVector(&_staticData) { + _staticData.capacity = N; + _staticData.length = 0; + } + //! Destroy the `PodVectorTmp` and its data. + ASMJIT_INLINE ~PodVectorTmp() noexcept {} + + // -------------------------------------------------------------------------- + // [Members] + // -------------------------------------------------------------------------- + + StaticData _staticData; +}; + +//! \} + +} // asmjit namespace + +// [Api-End] +#include "../apiend.h" + +// [Guard] +#endif // _ASMJIT_BASE_PODVECTOR_H diff --git a/src/asmjit/base/runtime.cpp b/src/asmjit/base/runtime.cpp index 88e39dd..4cdae44 100644 --- a/src/asmjit/base/runtime.cpp +++ b/src/asmjit/base/runtime.cpp @@ -9,62 +9,56 @@ // [Dependencies - AsmJit] #include "../base/assembler.h" -#include "../base/cpuinfo.h" #include "../base/runtime.h" +// TODO: Rename this, or make call conv independent of CompilerFunc. +#include "../base/compilerfunc.h" + // [Api-Begin] #include "../apibegin.h" namespace asmjit { // ============================================================================ -// [asmjit::Runtime - Construction / Destruction] +// [asmjit::Runtime - Utilities] // ============================================================================ -Runtime::Runtime() { - _sizeLimit = 0; - _baseAddress = kNoBaseAddress; - - _runtimeType = kRuntimeTypeNone; - _allocType = kVMemAllocFreeable; - ::memset(_reserved, 0, sizeof(_reserved)); -} -Runtime::~Runtime() {} - -// ============================================================================ -// [asmjit::HostRuntime - Construction / Destruction] -// ============================================================================ - -HostRuntime::HostRuntime() { _runtimeType = kRuntimeTypeJit; } -HostRuntime::~HostRuntime() {} - -// ============================================================================ -// [asmjit::HostRuntime - Interface] -// ============================================================================ - -const CpuInfo* HostRuntime::getCpuInfo() { - return CpuInfo::getHost(); -} - -uint32_t HostRuntime::getStackAlignment() { +static ASMJIT_INLINE uint32_t hostStackAlignment() noexcept { + // By default a pointer-size stack alignment is assumed. uint32_t alignment = sizeof(intptr_t); - // Modern Linux, APPLE and UNIX guarantees 16-byte stack alignment, but I'm - // not sure about all other UNIX operating systems, because 16-byte alignment - // is addition to an older specification. -#if (ASMJIT_ARCH_X64) || \ - (ASMJIT_ARCH_X86 && (ASMJIT_OS_LINUX || ASMJIT_OS_BSD || ASMJIT_OS_MAC || ASMJIT_OS_ANDROID)) - alignment = 16; + // ARM & ARM64 + // ----------- + // + // - 32-bit ARM requires stack to be aligned to 8 bytes. + // - 64-bit ARM requires stack to be aligned to 16 bytes. +#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + alignment = ASMJIT_ARCH_ARM32 ? 8 : 16; +#endif + + // X86 & X64 + // --------- + // + // - 32-bit X86 requires stack to be aligned to 4 bytes. Modern Linux, APPLE + // and UNIX guarantees 16-byte stack alignment even in 32-bit, but I'm + // not sure about all other UNIX operating systems, because 16-byte alignment + // is addition to an older specification. + // - 64-bit X86 requires stack to be aligned to 16 bytes. +#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 + int modernOS = ASMJIT_OS_LINUX || // Linux & ANDROID. + ASMJIT_OS_MAC || // OSX and iOS. + ASMJIT_OS_BSD; // BSD variants. + alignment = ASMJIT_ARCH_X64 || modernOS ? 16 : 4; #endif return alignment; } -void HostRuntime::flush(void* p, size_t size) { +static ASMJIT_INLINE void hostFlushInstructionCache(void* p, size_t size) noexcept { // Only useful on non-x86 architectures. #if !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 # if ASMJIT_OS_WINDOWS - // Windows has built-in support in kernel32.dll. + // Windows has a built-in support in kernel32.dll. ::FlushInstructionCache(_memMgr.getProcessHandle(), p, size); # endif // ASMJIT_OS_WINDOWS #else @@ -73,11 +67,51 @@ void HostRuntime::flush(void* p, size_t size) { #endif // !ASMJIT_ARCH_X86 && !ASMJIT_ARCH_X64 } +// ============================================================================ +// [asmjit::Runtime - Construction / Destruction] +// ============================================================================ + +Runtime::Runtime() noexcept + : _runtimeType(kTypeNone), + _allocType(kVMemAllocFreeable), + _cpuInfo(), + _stackAlignment(0), + _cdeclConv(kCallConvNone), + _stdCallConv(kCallConvNone), + _baseAddress(kNoBaseAddress), + _sizeLimit(0) { + + ::memset(_reserved, 0, sizeof(_reserved)); +} +Runtime::~Runtime() noexcept {} + +// ============================================================================ +// [asmjit::HostRuntime - Construction / Destruction] +// ============================================================================ + +HostRuntime::HostRuntime() noexcept { + _runtimeType = kTypeJit; + _cpuInfo = CpuInfo::getHost(); + + _stackAlignment = hostStackAlignment(); + _cdeclConv = kCallConvHostCDecl; + _stdCallConv = kCallConvHostStdCall; +} +HostRuntime::~HostRuntime() noexcept {} + +// ============================================================================ +// [asmjit::HostRuntime - Interface] +// ============================================================================ + +void HostRuntime::flush(void* p, size_t size) noexcept { + hostFlushInstructionCache(p, size); +} + // ============================================================================ // [asmjit::StaticRuntime - Construction / Destruction] // ============================================================================ -StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) { +StaticRuntime::StaticRuntime(void* baseAddress, size_t sizeLimit) noexcept { _sizeLimit = sizeLimit; _baseAddress = static_cast((uintptr_t)baseAddress); } @@ -87,7 +121,7 @@ StaticRuntime::~StaticRuntime() {} // [asmjit::StaticRuntime - Interface] // ============================================================================ -Error StaticRuntime::add(void** dst, Assembler* assembler) { +Error StaticRuntime::add(void** dst, Assembler* assembler) noexcept { size_t codeSize = assembler->getCodeSize(); size_t sizeLimit = _sizeLimit; @@ -123,7 +157,7 @@ Error StaticRuntime::add(void** dst, Assembler* assembler) { return kErrorOk; } -Error StaticRuntime::release(void* p) { +Error StaticRuntime::release(void* p) noexcept { // There is nothing to release as `StaticRuntime` doesn't manage any memory. ASMJIT_UNUSED(p); return kErrorOk; @@ -133,14 +167,14 @@ Error StaticRuntime::release(void* p) { // [asmjit::JitRuntime - Construction / Destruction] // ============================================================================ -JitRuntime::JitRuntime() {} -JitRuntime::~JitRuntime() {} +JitRuntime::JitRuntime() noexcept {} +JitRuntime::~JitRuntime() noexcept {} // ============================================================================ // [asmjit::JitRuntime - Interface] // ============================================================================ -Error JitRuntime::add(void** dst, Assembler* assembler) { +Error JitRuntime::add(void** dst, Assembler* assembler) noexcept { size_t codeSize = assembler->getCodeSize(); if (codeSize == 0) { *dst = nullptr; @@ -155,17 +189,22 @@ Error JitRuntime::add(void** dst, Assembler* assembler) { // Relocate the code and release the unused memory back to `VMemMgr`. size_t relocSize = assembler->relocCode(p); - if (relocSize < codeSize) { - _memMgr.shrink(p, relocSize); + if (relocSize == 0) { + *dst = nullptr; + _memMgr.release(p); + return kErrorInvalidState; } + if (relocSize < codeSize) + _memMgr.shrink(p, relocSize); + flush(p, relocSize); *dst = p; return kErrorOk; } -Error JitRuntime::release(void* p) { +Error JitRuntime::release(void* p) noexcept { return _memMgr.release(p); } diff --git a/src/asmjit/base/runtime.h b/src/asmjit/base/runtime.h index 095aedd..ff68732 100644 --- a/src/asmjit/base/runtime.h +++ b/src/asmjit/base/runtime.h @@ -9,6 +9,7 @@ #define _ASMJIT_BASE_RUNTIME_H // [Dependencies - AsmJit] +#include "../base/cpuinfo.h" #include "../base/vmem.h" // [Api-Begin] @@ -26,16 +27,6 @@ struct CpuInfo; //! \addtogroup asmjit_base //! \{ -// ============================================================================ -// [asmjit::RuntimeType] -// ============================================================================ - -ASMJIT_ENUM(RuntimeType) { - kRuntimeTypeNone = 0, - kRuntimeTypeJit = 1, - kRuntimeTypeRemote = 2 -}; - // ============================================================================ // [asmjit::Runtime] // ============================================================================ @@ -44,63 +35,96 @@ ASMJIT_ENUM(RuntimeType) { struct ASMJIT_VIRTAPI Runtime { ASMJIT_NO_COPY(Runtime) + // -------------------------------------------------------------------------- + // [asmjit::RuntimeType] + // -------------------------------------------------------------------------- + + ASMJIT_ENUM(Type) { + kTypeNone = 0, + kTypeJit = 1, + kTypeRemote = 2 + }; + // -------------------------------------------------------------------------- // [Construction / Destruction] // -------------------------------------------------------------------------- //! Create a `Runtime` instance. - ASMJIT_API Runtime(); + ASMJIT_API Runtime() noexcept; //! Destroy the `Runtime` instance. - ASMJIT_API virtual ~Runtime(); + ASMJIT_API virtual ~Runtime() noexcept; // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- - //! Get runtime type. - ASMJIT_INLINE uint32_t getRuntimeType() const { return _runtimeType; } + //! Get the runtime type, see \ref Type. + ASMJIT_INLINE uint32_t getRuntimeType() const noexcept { return _runtimeType; } + + //! Get stack alignment of the target. + ASMJIT_INLINE uint32_t getStackAlignment() const noexcept { return _stackAlignment; } + + //! Get the CDECL calling convention conforming to the runtime's ABI. + //! + //! NOTE: This is a default calling convention used by the runtime's target. + ASMJIT_INLINE uint32_t getCdeclConv() const noexcept { return _cdeclConv; } + //! Get the STDCALL calling convention conforming to the runtime's ABI. + //! + //! NOTE: STDCALL calling convention is only used by 32-bit x86 target. On + //! all other targets it's mapped to CDECL and calling `getStdcallConv()` will + //! return the same as `getCdeclConv()`. + ASMJIT_INLINE uint32_t getStdCallConv() const noexcept { return _stdCallConv; } + + //! Get CPU information. + ASMJIT_INLINE const CpuInfo& getCpuInfo() const noexcept { return _cpuInfo; } + //! Set CPU information. + ASMJIT_INLINE void setCpuInfo(const CpuInfo& ci) noexcept { _cpuInfo = ci; } //! Get whether the runtime has a base address. - ASMJIT_INLINE bool hasBaseAddress() const { return _baseAddress != kNoBaseAddress; } + ASMJIT_INLINE bool hasBaseAddress() const noexcept { return _baseAddress != kNoBaseAddress; } //! Get the base address. - ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; } + ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; } // -------------------------------------------------------------------------- // [Interface] // -------------------------------------------------------------------------- - //! Get CPU information. - virtual const CpuInfo* getCpuInfo() = 0; - - //! Get stack alignment of target runtime. - virtual uint32_t getStackAlignment() = 0; - //! Allocate a memory needed for a code generated by `assembler` and //! relocate it to the target location. //! //! The beginning of the memory allocated for the function is returned in //! `dst`. Returns Status code as \ref ErrorCode, on failure `dst` is set to //! `nullptr`. - virtual Error add(void** dst, Assembler* assembler) = 0; + virtual Error add(void** dst, Assembler* assembler) noexcept = 0; //! Release memory allocated by `add`. - virtual Error release(void* p) = 0; + virtual Error release(void* p) noexcept = 0; // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- - //! Maximum size of the code that can be added to the runtime (0=unlimited). - size_t _sizeLimit; - //! Base address (-1 means no base address). - Ptr _baseAddress; - //! Type of the runtime. uint8_t _runtimeType; //! Type of the allocation. uint8_t _allocType; + + //! Runtime's stack alignment. + uint8_t _stackAlignment; + //! CDECL calling convention conforming to runtime ABI. + uint8_t _cdeclConv; + //! STDCALL calling convention conforming to runtime ABI. + uint8_t _stdCallConv; //! \internal - uint8_t _reserved[sizeof(intptr_t) - 2]; + uint8_t _reserved[3]; + + //! Runtime CPU information. + CpuInfo _cpuInfo; + + //! Base address (-1 means no base address). + Ptr _baseAddress; + //! Maximum size of the code that can be added to the runtime (0=unlimited). + size_t _sizeLimit; }; // ============================================================================ @@ -116,17 +140,14 @@ struct ASMJIT_VIRTAPI HostRuntime : public Runtime { // -------------------------------------------------------------------------- //! Create a `HostRuntime` instance. - ASMJIT_API HostRuntime(); + ASMJIT_API HostRuntime() noexcept; //! Destroy the `HostRuntime` instance. - ASMJIT_API virtual ~HostRuntime(); + ASMJIT_API virtual ~HostRuntime() noexcept; // -------------------------------------------------------------------------- // [Interface] // -------------------------------------------------------------------------- - ASMJIT_API virtual const CpuInfo* getCpuInfo(); - ASMJIT_API virtual uint32_t getStackAlignment(); - //! Flush an instruction cache. //! //! This member function is called after the code has been copied to the @@ -138,7 +159,7 @@ struct ASMJIT_VIRTAPI HostRuntime : public Runtime { //! //! This function can also be overridden to improve compatibility with tools //! such as Valgrind, however, it's not an official part of AsmJit. - ASMJIT_API virtual void flush(void* p, size_t size); + ASMJIT_API virtual void flush(void* p, size_t size) noexcept; }; // ============================================================================ @@ -159,31 +180,31 @@ struct ASMJIT_VIRTAPI StaticRuntime : public HostRuntime { //! Create a `StaticRuntime` instance. //! //! The `address` specifies a fixed target address, which will be used as a - //! base address for relocation, and `sizeLimit` specified the maximum size + //! base address for relocation, and `sizeLimit` specifies the maximum size //! of a code that can be copied to it. If there is no limit `sizeLimit` //! should be zero. - ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0); + ASMJIT_API StaticRuntime(void* baseAddress, size_t sizeLimit = 0) noexcept; //! Destroy the `StaticRuntime` instance. - ASMJIT_API virtual ~StaticRuntime(); + ASMJIT_API virtual ~StaticRuntime() noexcept; // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the base address. - ASMJIT_INLINE Ptr getBaseAddress() const { return _baseAddress; } + ASMJIT_INLINE Ptr getBaseAddress() const noexcept { return _baseAddress; } //! Get the maximum size of the code that can be relocated/stored in the target. //! //! Returns zero if unlimited. - ASMJIT_INLINE size_t getSizeLimit() const { return _sizeLimit; } + ASMJIT_INLINE size_t getSizeLimit() const noexcept { return _sizeLimit; } // -------------------------------------------------------------------------- // [Interface] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error add(void** dst, Assembler* assembler); - ASMJIT_API virtual Error release(void* p); + ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept; + ASMJIT_API virtual Error release(void* p) noexcept; }; // ============================================================================ @@ -199,28 +220,28 @@ struct ASMJIT_VIRTAPI JitRuntime : public HostRuntime { // -------------------------------------------------------------------------- //! Create a `JitRuntime` instance. - ASMJIT_API JitRuntime(); + ASMJIT_API JitRuntime() noexcept; //! Destroy the `JitRuntime` instance. - ASMJIT_API virtual ~JitRuntime(); + ASMJIT_API virtual ~JitRuntime() noexcept; // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the type of allocation. - ASMJIT_INLINE uint32_t getAllocType() const { return _allocType; } + ASMJIT_INLINE uint32_t getAllocType() const noexcept { return _allocType; } //! Set the type of allocation. - ASMJIT_INLINE void setAllocType(uint32_t allocType) { _allocType = allocType; } + ASMJIT_INLINE void setAllocType(uint32_t allocType) noexcept { _allocType = allocType; } //! Get the virtual memory manager. - ASMJIT_INLINE VMemMgr* getMemMgr() const { return const_cast(&_memMgr); } + ASMJIT_INLINE VMemMgr* getMemMgr() const noexcept { return const_cast(&_memMgr); } // -------------------------------------------------------------------------- // [Interface] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error add(void** dst, Assembler* assembler); - ASMJIT_API virtual Error release(void* p); + ASMJIT_API virtual Error add(void** dst, Assembler* assembler) noexcept; + ASMJIT_API virtual Error release(void* p) noexcept; // -------------------------------------------------------------------------- // [Members] diff --git a/src/asmjit/base/utils.cpp b/src/asmjit/base/utils.cpp index ea5c653..146109b 100644 --- a/src/asmjit/base/utils.cpp +++ b/src/asmjit/base/utils.cpp @@ -43,7 +43,7 @@ namespace asmjit { static volatile uint32_t Utils_hiResTicks; static volatile double Utils_hiResFreq; -uint32_t Utils::getTickCount() { +uint32_t Utils::getTickCount() noexcept { do { uint32_t hiResOk = Utils_hiResTicks; @@ -87,7 +87,7 @@ uint32_t Utils::getTickCount() { #elif ASMJIT_OS_MAC static mach_timebase_info_data_t CpuTicks_machTime; -uint32_t Utils::getTickCount() { +uint32_t Utils::getTickCount() noexcept { // Initialize the first time CpuTicks::now() is called (See Apple's QA1398). if (CpuTicks_machTime.denom == 0) { if (mach_timebase_info(&CpuTicks_machTime) != KERN_SUCCESS) @@ -106,7 +106,7 @@ uint32_t Utils::getTickCount() { // ============================================================================ #else -uint32_t Utils::getTickCount() { +uint32_t Utils::getTickCount() noexcept { #if defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0 struct timespec ts; @@ -122,6 +122,10 @@ uint32_t Utils::getTickCount() { } #endif // ASMJIT_OS +// ============================================================================ +// [asmjit::Utils - Unit] +// ============================================================================ + #if defined(ASMJIT_TEST) UNIT(base_utils) { uint32_t i; diff --git a/src/asmjit/base/utils.h b/src/asmjit/base/utils.h index 096cf79..817b52c 100644 --- a/src/asmjit/base/utils.h +++ b/src/asmjit/base/utils.h @@ -11,7 +11,7 @@ // [Dependencies - AsmJit] #include "../base/globals.h" -#if defined(_MSC_VER) && _MSC_VER >= 1400 +#if ASMJIT_CC_MSC_GE(14, 0, 0) # include #endif // ASMJIT_OS_WINDOWS @@ -66,7 +66,7 @@ struct IntTraits { typedef typename IntTraitsPrivate::UnsignedType UnsignedType; //! Get a minimum value of `T`. - static ASMJIT_INLINE T minValue() { + static ASMJIT_INLINE T minValue() noexcept { if (kIsSigned) return static_cast((~static_cast(0) >> 1) + static_cast(1)); else @@ -74,7 +74,7 @@ struct IntTraits { } //! Get a maximum value of `T`. - static ASMJIT_INLINE T maxValue() { + static ASMJIT_INLINE T maxValue() noexcept { if (kIsSigned) return static_cast(~static_cast(0) >> 1); else @@ -106,53 +106,64 @@ struct Utils { double d; }; - //! Bit-cast `float` to 32-bit integer. - static ASMJIT_INLINE int32_t floatAsInt(float f) { FloatBits m; m.f = f; return m.i; } + //! Bit-cast `float` to a 32-bit integer. + static ASMJIT_INLINE int32_t floatAsInt(float f) noexcept { FloatBits m; m.f = f; return m.i; } //! Bit-cast 32-bit integer to `float`. - static ASMJIT_INLINE float intAsFloat(int32_t i) { FloatBits m; m.i = i; return m.f; } + static ASMJIT_INLINE float intAsFloat(int32_t i) noexcept { FloatBits m; m.i = i; return m.f; } - //! Bit-cast `double` to 64-bit integer. - static ASMJIT_INLINE int64_t doubleAsInt(double d) { DoubleBits m; m.d = d; return m.i; } + //! Bit-cast `double` to a 64-bit integer. + static ASMJIT_INLINE int64_t doubleAsInt(double d) noexcept { DoubleBits m; m.d = d; return m.i; } //! Bit-cast 64-bit integer to `double`. - static ASMJIT_INLINE double intAsDouble(int64_t i) { DoubleBits m; m.i = i; return m.d; } + static ASMJIT_INLINE double intAsDouble(int64_t i) noexcept { DoubleBits m; m.i = i; return m.d; } // -------------------------------------------------------------------------- // [Pack / Unpack] // -------------------------------------------------------------------------- //! Pack two 8-bit integer and one 16-bit integer into a 32-bit integer as it - //! is an array of `{u0,u1,w2}`. - static ASMJIT_INLINE uint32_t pack32_2x8_1x16(uint32_t u0, uint32_t u1, uint32_t w2) { - return ASMJIT_ARCH_LE ? u0 + (u1 << 8) + (w2 << 16) - : (u0 << 24) + (u1 << 16) + w2; + //! is an array of `{b0,b1,w2}`. + static ASMJIT_INLINE uint32_t pack32_2x8_1x16(uint32_t b0, uint32_t b1, uint32_t w2) noexcept { + return ASMJIT_ARCH_LE ? b0 + (b1 << 8) + (w2 << 16) + : (b0 << 24) + (b1 << 16) + w2; } - //! Pack four 8-bit integer into a 32-bit integer as it is an array of `{u0,u1,u2,u3}`. - static ASMJIT_INLINE uint32_t pack32_4x8(uint32_t u0, uint32_t u1, uint32_t u2, uint32_t u3) { - return ASMJIT_ARCH_LE ? u0 + (u1 << 8) + (u2 << 16) + (u3 << 24) - : (u0 << 24) + (u1 << 16) + (u2 << 8) + u3; + //! Pack four 8-bit integer into a 32-bit integer as it is an array of `{b0,b1,b2,b3}`. + static ASMJIT_INLINE uint32_t pack32_4x8(uint32_t b0, uint32_t b1, uint32_t b2, uint32_t b3) noexcept { + return ASMJIT_ARCH_LE ? b0 + (b1 << 8) + (b2 << 16) + (b3 << 24) + : (b0 << 24) + (b1 << 16) + (b2 << 8) + b3; } //! Pack two 32-bit integer into a 64-bit integer as it is an array of `{u0,u1}`. - static ASMJIT_INLINE uint64_t pack64_2x32(uint32_t u0, uint32_t u1) { + static ASMJIT_INLINE uint64_t pack64_2x32(uint32_t u0, uint32_t u1) noexcept { return ASMJIT_ARCH_LE ? (static_cast(u1) << 32) + u0 : (static_cast(u0) << 32) + u1; } + // -------------------------------------------------------------------------- + // [Position of byte (in bit-shift)] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE uint32_t byteShiftOfDWordStruct(uint32_t index) noexcept { + if (ASMJIT_ARCH_LE) + return index * 8; + else + return (sizeof(uint32_t) - 1 - index) * 8; + } + // -------------------------------------------------------------------------- // [Min/Max] // -------------------------------------------------------------------------- - // NOTE: Because some environments declare min() and max() as macros, it has - // been decided to use different name so we never collide with them. + // Some environments declare `min()` and `max()` as preprocessor macros so it + // was decided to use different names to prevent such collision. //! Get minimum value of `a` and `b`. template - static ASMJIT_INLINE T iMin(const T& a, const T& b) { return a < b ? a : b; } + static ASMJIT_INLINE T iMin(const T& a, const T& b) noexcept { return a < b ? a : b; } //! Get maximum value of `a` and `b`. template - static ASMJIT_INLINE T iMax(const T& a, const T& b) { return a > b ? a : b; } + static ASMJIT_INLINE T iMax(const T& a, const T& b) noexcept { return a > b ? a : b; } // -------------------------------------------------------------------------- // [InInterval] @@ -160,7 +171,7 @@ struct Utils { //! Get whether `x` is greater than or equal to `a` and lesses than or equal to `b`. template - static ASMJIT_INLINE bool inInterval(T x, T a, T b) { + static ASMJIT_INLINE bool inInterval(T x, T a, T b) noexcept { return x >= a && x <= b; } @@ -173,7 +184,7 @@ struct Utils { //! arbitrary integer type into a function that accepts either `int` or //! `int64_t`. template - static ASMJIT_INLINE typename IntTraits::IntType asInt(T x) { + static ASMJIT_INLINE typename IntTraits::IntType asInt(T x) noexcept { return static_cast::IntType>(x); } @@ -183,7 +194,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to an 8-bit signed integer. template - static ASMJIT_INLINE bool isInt8(T x) { + static ASMJIT_INLINE bool isInt8(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -195,7 +206,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to a 16-bit signed integer. template - static ASMJIT_INLINE bool isInt16(T x) { + static ASMJIT_INLINE bool isInt16(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -207,7 +218,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to a 32-bit signed integer. template - static ASMJIT_INLINE bool isInt32(T x) { + static ASMJIT_INLINE bool isInt32(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -219,7 +230,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to an 8-bit unsigned integer. template - static ASMJIT_INLINE bool isUInt8(T x) { + static ASMJIT_INLINE bool isUInt8(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -231,7 +242,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to a 12-bit unsigned integer (ARM specific). template - static ASMJIT_INLINE bool isUInt12(T x) { + static ASMJIT_INLINE bool isUInt12(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -243,7 +254,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to a 16-bit unsigned integer. template - static ASMJIT_INLINE bool isUInt16(T x) { + static ASMJIT_INLINE bool isUInt16(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -255,7 +266,7 @@ struct Utils { //! Get whether the given integer `x` can be casted to a 32-bit unsigned integer. template - static ASMJIT_INLINE bool isUInt32(T x) { + static ASMJIT_INLINE bool isUInt32(T x) noexcept { typedef typename IntTraits::SignedType SignedType; typedef typename IntTraits::UnsignedType UnsignedType; @@ -271,7 +282,7 @@ struct Utils { //! Get whether the `n` value is a power of two (only one bit is set). template - static ASMJIT_INLINE bool isPowerOf2(T n) { + static ASMJIT_INLINE bool isPowerOf2(T n) noexcept { return n != 0 && (n & (n - 1)) == 0; } @@ -280,54 +291,54 @@ struct Utils { // -------------------------------------------------------------------------- //! Generate a bit-mask that has `x` bit set. - static ASMJIT_INLINE uint32_t mask(uint32_t x) { + static ASMJIT_INLINE uint32_t mask(uint32_t x) noexcept { ASMJIT_ASSERT(x < 32); return static_cast(1) << x; } //! Generate a bit-mask that has `x0` and `x1` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1) { + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1) noexcept { return mask(x0) | mask(x1); } //! Generate a bit-mask that has `x0`, `x1` and `x2` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2) { - return mask(x0) | mask(x1) | mask(x2); + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2) noexcept { + return mask(x0, x1) | mask(x2); } //! Generate a bit-mask that has `x0`, `x1`, `x2` and `x3` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3); + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { + return mask(x0, x1) | mask(x2, x3); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3` and `x4` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4` and `x5` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4, x5); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5` and `x6` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6` and `x7` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7` and `x8` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) | mask(x8) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7) | mask(x8); } //! Generate a bit-mask that has `x0`, `x1`, `x2`, `x3`, `x4`, `x5`, `x6`, `x7`, `x8` and `x9` bits set. - static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8, uint32_t x9) { - return mask(x0) | mask(x1) | mask(x2) | mask(x3) | mask(x4) | mask(x5) | mask(x6) | mask(x7) | mask(x8) | mask(x9) ; + static ASMJIT_INLINE uint32_t mask(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7, uint32_t x8, uint32_t x9) noexcept { + return mask(x0, x1) | mask(x2, x3) | mask(x4, x5) | mask(x6, x7) | mask(x8, x9); } // -------------------------------------------------------------------------- @@ -335,13 +346,10 @@ struct Utils { // -------------------------------------------------------------------------- //! Generate a bit-mask that has `x` most significant bits set. - static ASMJIT_INLINE uint32_t bits(uint32_t x) { - // Shifting more bits that the type has has undefined behavior. Everything - // we need is that application shouldn't crash because of that, but the - // content of register after shift is not defined. So in case that the - // requested shift is too large for the type we correct this undefined - // behavior by setting all bits to ones (this is why we generate an overflow - // mask). + static ASMJIT_INLINE uint32_t bits(uint32_t x) noexcept { + // Shifting more bits than the type has results in undefined behavior. In + // such case asmjit trashes the result by ORing with `overflow` mask, which + // discards the undefined value returned by the shift. uint32_t overflow = static_cast( -static_cast(x >= sizeof(uint32_t) * 8)); @@ -353,29 +361,38 @@ struct Utils { // -------------------------------------------------------------------------- //! Get whether `x` has bit `n` set. - static ASMJIT_INLINE bool hasBit(uint32_t x, uint32_t n) { - return (x & (static_cast(1) << n)) != 0; + template + static ASMJIT_INLINE bool hasBit(T x, Index n) noexcept { + return (x & (static_cast(1) << n)) != 0; } // -------------------------------------------------------------------------- // [BitCount] // -------------------------------------------------------------------------- - //! Get count of bits in `x`. - //! - //! Taken from http://graphics.stanford.edu/~seander/bithacks.html . - static ASMJIT_INLINE uint32_t bitCount(uint32_t x) { + static ASMJIT_INLINE uint32_t bitCountSlow(uint32_t x) noexcept { x = x - ((x >> 1) & 0x55555555U); x = (x & 0x33333333U) + ((x >> 2) & 0x33333333U); return (((x + (x >> 4)) & 0x0F0F0F0FU) * 0x01010101U) >> 24; } + //! Get count of bits in `x`. + //! + //! Taken from http://graphics.stanford.edu/~seander/bithacks.html . + static ASMJIT_INLINE uint32_t bitCount(uint32_t x) noexcept { +#if ASMJIT_CC_GCC || ASMJIT_CC_CLANG + return __builtin_popcount(x); +#else + return bitCountSlow(x); +#endif + } + // -------------------------------------------------------------------------- // [FindFirstBit] // -------------------------------------------------------------------------- //! \internal - static ASMJIT_INLINE uint32_t findFirstBitSlow(uint32_t mask) { + static ASMJIT_INLINE uint32_t findFirstBitSlow(uint32_t mask) noexcept { // This is a reference (slow) implementation of `findFirstBit()`, used when // we don't have a C++ compiler support. The implementation speed has been // improved to check for 2 bits per iteration. @@ -394,14 +411,19 @@ struct Utils { } //! Find a first bit in `mask`. - static ASMJIT_INLINE uint32_t findFirstBit(uint32_t mask) { -#if defined(_MSC_VER) && _MSC_VER >= 1400 + static ASMJIT_INLINE uint32_t findFirstBit(uint32_t mask) noexcept { +#if ASMJIT_CC_MSC_GE(14, 0, 0) && (ASMJIT_ARCH_X86 || ASMJIT_ARCH_ARM32 || \ + ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64) DWORD i; - if (_BitScanForward(&i, mask)) { - ASMJIT_ASSERT(findFirstBitSlow(mask) == i); + if (_BitScanForward(&i, mask)) return static_cast(i); - } - return 0xFFFFFFFFU; + else + return 0xFFFFFFFFU; +#elif ASMJIT_CC_GCC_GE(3, 4, 6) || ASMJIT_CC_CLANG + if (mask) + return __builtin_ctz(mask); + else + return 0xFFFFFFFFU; #else return findFirstBitSlow(mask); #endif @@ -411,7 +433,7 @@ struct Utils { // [Misc] // -------------------------------------------------------------------------- - static ASMJIT_INLINE uint32_t keepNOnesFromRight(uint32_t mask, uint32_t nBits) { + static ASMJIT_INLINE uint32_t keepNOnesFromRight(uint32_t mask, uint32_t nBits) noexcept { uint32_t m = 0x1; do { @@ -427,7 +449,7 @@ struct Utils { return mask; } - static ASMJIT_INLINE uint32_t indexNOnesFromRight(uint8_t* dst, uint32_t mask, uint32_t nBits) { + static ASMJIT_INLINE uint32_t indexNOnesFromRight(uint8_t* dst, uint32_t mask, uint32_t nBits) noexcept { uint32_t totalBits = nBits; uint8_t i = 0; uint32_t m = 0x1; @@ -451,18 +473,18 @@ struct Utils { // -------------------------------------------------------------------------- template - static ASMJIT_INLINE bool isAligned(T base, T alignment) { + static ASMJIT_INLINE bool isAligned(T base, T alignment) noexcept { return (base % alignment) == 0; } //! Align `base` to `alignment`. template - static ASMJIT_INLINE T alignTo(T base, T alignment) { + static ASMJIT_INLINE T alignTo(T base, T alignment) noexcept { return (base + (alignment - 1)) & ~(alignment - 1); } template - static ASMJIT_INLINE T alignToPowerOf2(T base) { + static ASMJIT_INLINE T alignToPowerOf2(T base) noexcept { // Implementation is from "Hacker's Delight" by Henry S. Warren, Jr. base -= 1; @@ -490,7 +512,7 @@ struct Utils { //! Get delta required to align `base` to `alignment`. template - static ASMJIT_INLINE T alignDiff(T base, T alignment) { + static ASMJIT_INLINE T alignDiff(T base, T alignment) noexcept { return alignTo(base, alignment) - base; } @@ -498,7 +520,7 @@ struct Utils { // [String] // -------------------------------------------------------------------------- - static ASMJIT_INLINE size_t strLen(const char* s, size_t maxlen) { + static ASMJIT_INLINE size_t strLen(const char* s, size_t maxlen) noexcept { size_t i; for (i = 0; i < maxlen; i++) if (!s[i]) @@ -507,11 +529,443 @@ struct Utils { } // -------------------------------------------------------------------------- - // [CpuTicks] + // [BSwap] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE uint32_t bswap32(uint32_t x) noexcept { +#if ASMJIT_CC_MSC + return static_cast(_byteswap_ulong(x)); +#elif ASMJIT_CC_GCC_GE(4, 3, 0) || ASMJIT_CC_CLANG_GE(2, 6, 0) + return __builtin_bswap32(x); +#else + uint32_t y = x & 0x00FFFF00U; + x = (x << 24) + (x >> 24); + y = (y << 8) + (y >> 8); + return x + (y & 0x00FFFF00U); +#endif + } + + // -------------------------------------------------------------------------- + // [ReadMem] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE uint32_t readU8(const void* p) noexcept { + return static_cast(static_cast(p)[0]); + } + + static ASMJIT_INLINE int32_t readI8(const void* p) noexcept { + return static_cast(static_cast(p)[0]); + } + + template + static ASMJIT_INLINE uint32_t readU16xLE(const void* p) noexcept { + if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + return static_cast(static_cast(p)[0]); + } + else { + uint32_t x = static_cast(static_cast(p)[0]); + uint32_t y = static_cast(static_cast(p)[1]); + return x + (y << 8); + } + } + + template + static ASMJIT_INLINE uint32_t readU16xBE(const void* p) noexcept { + if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + return static_cast(static_cast(p)[0]); + } + else { + uint32_t x = static_cast(static_cast(p)[0]); + uint32_t y = static_cast(static_cast(p)[1]); + return (x << 8) + y; + } + } + + template + static ASMJIT_INLINE uint32_t readU16x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readU16xLE(p) : readU16xBE(p); + } + + template + static ASMJIT_INLINE int32_t readI16xLE(const void* p) noexcept { + if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + return static_cast(static_cast(p)[0]); + } + else { + int32_t x = static_cast(static_cast(p)[0]); + int32_t y = static_cast(static_cast(p)[1]); + return x + (y << 8); + } + } + + template + static ASMJIT_INLINE int32_t readI16xBE(const void* p) noexcept { + if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + return static_cast(static_cast(p)[0]); + } + else { + int32_t x = static_cast(static_cast(p)[0]); + int32_t y = static_cast(static_cast(p)[1]); + return (x << 8) + y; + } + } + + template + static ASMJIT_INLINE int32_t readI16x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readI16xLE(p) : readI16xBE(p); + } + + static ASMJIT_INLINE uint32_t readU16aLE(const void* p) noexcept { return readU16xLE<2>(p); } + static ASMJIT_INLINE uint32_t readU16uLE(const void* p) noexcept { return readU16xLE<0>(p); } + + static ASMJIT_INLINE uint32_t readU16aBE(const void* p) noexcept { return readU16xBE<2>(p); } + static ASMJIT_INLINE uint32_t readU16uBE(const void* p) noexcept { return readU16xBE<0>(p); } + + static ASMJIT_INLINE uint32_t readU16a(const void* p) noexcept { return readU16x<2>(p); } + static ASMJIT_INLINE uint32_t readU16u(const void* p) noexcept { return readU16x<0>(p); } + + static ASMJIT_INLINE int32_t readI16aLE(const void* p) noexcept { return readI16xLE<2>(p); } + static ASMJIT_INLINE int32_t readI16uLE(const void* p) noexcept { return readI16xLE<0>(p); } + + static ASMJIT_INLINE int32_t readI16aBE(const void* p) noexcept { return readI16xBE<2>(p); } + static ASMJIT_INLINE int32_t readI16uBE(const void* p) noexcept { return readI16xBE<0>(p); } + + static ASMJIT_INLINE int32_t readI16a(const void* p) noexcept { return readI16x<2>(p); } + static ASMJIT_INLINE int32_t readI16u(const void* p) noexcept { return readI16x<0>(p); } + + template + static ASMJIT_INLINE uint32_t readU32xLE(const void* p) noexcept { + if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { + uint32_t x = static_cast(p)[0]; + return ASMJIT_ARCH_LE ? x : bswap32(x); + } + else { + uint32_t x = readU16xLE(static_cast(p) + 0); + uint32_t y = readU16xLE(static_cast(p) + 2); + return x + (y << 16); + } + } + + template + static ASMJIT_INLINE uint32_t readU32xBE(const void* p) noexcept { + if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { + uint32_t x = static_cast(p)[0]; + return ASMJIT_ARCH_BE ? x : bswap32(x); + } + else { + uint32_t x = readU16xBE(static_cast(p) + 0); + uint32_t y = readU16xBE(static_cast(p) + 2); + return (x << 16) + y; + } + } + + template + static ASMJIT_INLINE uint32_t readU32x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readU32xLE(p) : readU32xBE(p); + } + + template + static ASMJIT_INLINE int32_t readI32xLE(const void* p) noexcept { + return static_cast(readU32xLE(p)); + } + + template + static ASMJIT_INLINE int32_t readI32xBE(const void* p) noexcept { + return static_cast(readU32xBE(p)); + } + + template + static ASMJIT_INLINE int32_t readI32x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readI32xLE(p) : readI32xBE(p); + } + + static ASMJIT_INLINE uint32_t readU32a(const void* p) noexcept { return readU32x<4>(p); } + static ASMJIT_INLINE uint32_t readU32u(const void* p) noexcept { return readU32x<0>(p); } + + static ASMJIT_INLINE uint32_t readU32aLE(const void* p) noexcept { return readU32xLE<4>(p); } + static ASMJIT_INLINE uint32_t readU32uLE(const void* p) noexcept { return readU32xLE<0>(p); } + + static ASMJIT_INLINE uint32_t readU32aBE(const void* p) noexcept { return readU32xBE<4>(p); } + static ASMJIT_INLINE uint32_t readU32uBE(const void* p) noexcept { return readU32xBE<0>(p); } + + static ASMJIT_INLINE int32_t readI32a(const void* p) noexcept { return readI32x<4>(p); } + static ASMJIT_INLINE int32_t readI32u(const void* p) noexcept { return readI32x<0>(p); } + + static ASMJIT_INLINE int32_t readI32aLE(const void* p) noexcept { return readI32xLE<4>(p); } + static ASMJIT_INLINE int32_t readI32uLE(const void* p) noexcept { return readI32xLE<0>(p); } + + static ASMJIT_INLINE int32_t readI32aBE(const void* p) noexcept { return readI32xBE<4>(p); } + static ASMJIT_INLINE int32_t readI32uBE(const void* p) noexcept { return readI32xBE<0>(p); } + + template + static ASMJIT_INLINE uint64_t readU64xLE(const void* p) noexcept { + if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { + return static_cast(p)[0]; + } + else { + uint32_t x = readU32xLE(static_cast(p) + 0); + uint32_t y = readU32xLE(static_cast(p) + 4); + return static_cast(x) + (static_cast(y) << 32); + } + } + + template + static ASMJIT_INLINE uint64_t readU64xBE(const void* p) noexcept { + if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { + return static_cast(p)[0]; + } + else { + uint32_t x = readU32xLE(static_cast(p) + 0); + uint32_t y = readU32xLE(static_cast(p) + 4); + return (static_cast(x) << 32) + static_cast(y); + } + } + + template + static ASMJIT_INLINE uint64_t readU64x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readU64xLE(p) : readU64xBE(p); + } + + template + static ASMJIT_INLINE int64_t readI64xLE(const void* p) noexcept { + return static_cast(readU64xLE(p)); + } + + template + static ASMJIT_INLINE int64_t readI64xBE(const void* p) noexcept { + return static_cast(readU64xBE(p)); + } + + template + static ASMJIT_INLINE int64_t readI64x(const void* p) noexcept { + return ASMJIT_ARCH_LE ? readI64xLE(p) : readI64xBE(p); + } + + static ASMJIT_INLINE uint64_t readU64a(const void* p) noexcept { return readU64x<8>(p); } + static ASMJIT_INLINE uint64_t readU64u(const void* p) noexcept { return readU64x<0>(p); } + + static ASMJIT_INLINE uint64_t readU64aLE(const void* p) noexcept { return readU64xLE<8>(p); } + static ASMJIT_INLINE uint64_t readU64uLE(const void* p) noexcept { return readU64xLE<0>(p); } + + static ASMJIT_INLINE uint64_t readU64aBE(const void* p) noexcept { return readU64xBE<8>(p); } + static ASMJIT_INLINE uint64_t readU64uBE(const void* p) noexcept { return readU64xBE<0>(p); } + + static ASMJIT_INLINE int64_t readI64a(const void* p) noexcept { return readI64x<8>(p); } + static ASMJIT_INLINE int64_t readI64u(const void* p) noexcept { return readI64x<0>(p); } + + static ASMJIT_INLINE int64_t readI64aLE(const void* p) noexcept { return readI64xLE<8>(p); } + static ASMJIT_INLINE int64_t readI64uLE(const void* p) noexcept { return readI64xLE<0>(p); } + + static ASMJIT_INLINE int64_t readI64aBE(const void* p) noexcept { return readI64xBE<8>(p); } + static ASMJIT_INLINE int64_t readI64uBE(const void* p) noexcept { return readI64xBE<0>(p); } + + // -------------------------------------------------------------------------- + // [WriteMem] + // -------------------------------------------------------------------------- + + static ASMJIT_INLINE void writeU8(void* p, uint32_t x) noexcept { + static_cast(p)[0] = static_cast(x & 0xFFU); + } + + static ASMJIT_INLINE void writeI8(void* p, int32_t x) noexcept { + static_cast(p)[0] = static_cast(x & 0xFF); + } + + template + static ASMJIT_INLINE void writeU16xLE(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + static_cast(p)[0] = static_cast(x & 0xFFFFU); + } + else { + static_cast(p)[0] = static_cast((x ) & 0xFFU); + static_cast(p)[1] = static_cast((x >> 8) & 0xFFU); + } + } + + template + static ASMJIT_INLINE void writeU16xBE(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_16 || Alignment >= 2)) { + static_cast(p)[0] = static_cast(x & 0xFFFFU); + } + else { + static_cast(p)[0] = static_cast((x >> 8) & 0xFFU); + static_cast(p)[1] = static_cast((x ) & 0xFFU); + } + } + + template + static ASMJIT_INLINE void writeU16x(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_LE) + writeU16xLE(p, x); + else + writeU16xBE(p, x); + } + + template + static ASMJIT_INLINE void writeI16xLE(void* p, int32_t x) noexcept { + writeU16xLE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI16xBE(void* p, int32_t x) noexcept { + writeU16xBE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI16x(void* p, int32_t x) noexcept { + writeU16x(p, static_cast(x)); + } + + static ASMJIT_INLINE void writeU16aLE(void* p, uint32_t x) noexcept { writeU16xLE<2>(p, x); } + static ASMJIT_INLINE void writeU16uLE(void* p, uint32_t x) noexcept { writeU16xLE<0>(p, x); } + + static ASMJIT_INLINE void writeU16aBE(void* p, uint32_t x) noexcept { writeU16xBE<2>(p, x); } + static ASMJIT_INLINE void writeU16uBE(void* p, uint32_t x) noexcept { writeU16xBE<0>(p, x); } + + static ASMJIT_INLINE void writeU16a(void* p, uint32_t x) noexcept { writeU16x<2>(p, x); } + static ASMJIT_INLINE void writeU16u(void* p, uint32_t x) noexcept { writeU16x<0>(p, x); } + + static ASMJIT_INLINE void writeI16aLE(void* p, int32_t x) noexcept { writeI16xLE<2>(p, x); } + static ASMJIT_INLINE void writeI16uLE(void* p, int32_t x) noexcept { writeI16xLE<0>(p, x); } + + static ASMJIT_INLINE void writeI16aBE(void* p, int32_t x) noexcept { writeI16xBE<2>(p, x); } + static ASMJIT_INLINE void writeI16uBE(void* p, int32_t x) noexcept { writeI16xBE<0>(p, x); } + + static ASMJIT_INLINE void writeI16a(void* p, int32_t x) noexcept { writeI16x<2>(p, x); } + static ASMJIT_INLINE void writeI16u(void* p, int32_t x) noexcept { writeI16x<0>(p, x); } + + template + static ASMJIT_INLINE void writeU32xLE(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { + static_cast(p)[0] = ASMJIT_ARCH_LE ? x : bswap32(x); + } + else { + writeU16xLE(static_cast(p) + 0, x >> 16); + writeU16xLE(static_cast(p) + 2, x); + } + } + + template + static ASMJIT_INLINE void writeU32xBE(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_UNALIGNED_32 || Alignment >= 4) { + static_cast(p)[0] = ASMJIT_ARCH_BE ? x : bswap32(x); + } + else { + writeU16xBE(static_cast(p) + 0, x); + writeU16xBE(static_cast(p) + 2, x >> 16); + } + } + + template + static ASMJIT_INLINE void writeU32x(void* p, uint32_t x) noexcept { + if (ASMJIT_ARCH_LE) + writeU32xLE(p, x); + else + writeU32xBE(p, x); + } + + template + static ASMJIT_INLINE void writeI32xLE(void* p, int32_t x) noexcept { + writeU32xLE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI32xBE(void* p, int32_t x) noexcept { + writeU32xBE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI32x(void* p, int32_t x) noexcept { + writeU32x(p, static_cast(x)); + } + + static ASMJIT_INLINE void writeU32aLE(void* p, uint32_t x) noexcept { writeU32xLE<4>(p, x); } + static ASMJIT_INLINE void writeU32uLE(void* p, uint32_t x) noexcept { writeU32xLE<0>(p, x); } + + static ASMJIT_INLINE void writeU32aBE(void* p, uint32_t x) noexcept { writeU32xBE<4>(p, x); } + static ASMJIT_INLINE void writeU32uBE(void* p, uint32_t x) noexcept { writeU32xBE<0>(p, x); } + + static ASMJIT_INLINE void writeU32a(void* p, uint32_t x) noexcept { writeU32x<4>(p, x); } + static ASMJIT_INLINE void writeU32u(void* p, uint32_t x) noexcept { writeU32x<0>(p, x); } + + static ASMJIT_INLINE void writeI32aLE(void* p, int32_t x) noexcept { writeI32xLE<4>(p, x); } + static ASMJIT_INLINE void writeI32uLE(void* p, int32_t x) noexcept { writeI32xLE<0>(p, x); } + + static ASMJIT_INLINE void writeI32aBE(void* p, int32_t x) noexcept { writeI32xBE<4>(p, x); } + static ASMJIT_INLINE void writeI32uBE(void* p, int32_t x) noexcept { writeI32xBE<0>(p, x); } + + static ASMJIT_INLINE void writeI32a(void* p, int32_t x) noexcept { writeI32x<4>(p, x); } + static ASMJIT_INLINE void writeI32u(void* p, int32_t x) noexcept { writeI32x<0>(p, x); } + + template + static ASMJIT_INLINE void writeU64xLE(void* p, uint64_t x) noexcept { + if (ASMJIT_ARCH_LE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { + static_cast(p)[0] = x; + } + else { + writeU32xLE(static_cast(p) + 0, static_cast(x >> 32)); + writeU32xLE(static_cast(p) + 4, static_cast(x & 0xFFFFFFFFU)); + } + } + + template + static ASMJIT_INLINE void writeU64xBE(void* p, uint64_t x) noexcept { + if (ASMJIT_ARCH_BE && (ASMJIT_ARCH_UNALIGNED_64 || Alignment >= 8)) { + static_cast(p)[0] = x; + } + else { + writeU32xBE(static_cast(p) + 0, static_cast(x & 0xFFFFFFFFU)); + writeU32xBE(static_cast(p) + 4, static_cast(x >> 32)); + } + } + + template + static ASMJIT_INLINE void writeU64x(void* p, uint64_t x) noexcept { + if (ASMJIT_ARCH_LE) + writeU64xLE(p, x); + else + writeU64xBE(p, x); + } + + template + static ASMJIT_INLINE void writeI64xLE(void* p, int64_t x) noexcept { + writeU64xLE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI64xBE(void* p, int64_t x) noexcept { + writeU64xBE(p, static_cast(x)); + } + + template + static ASMJIT_INLINE void writeI64x(void* p, int64_t x) noexcept { + writeU64x(p, static_cast(x)); + } + + static ASMJIT_INLINE void writeU64aLE(void* p, uint64_t x) noexcept { writeU64xLE<8>(p, x); } + static ASMJIT_INLINE void writeU64uLE(void* p, uint64_t x) noexcept { writeU64xLE<0>(p, x); } + + static ASMJIT_INLINE void writeU64aBE(void* p, uint64_t x) noexcept { writeU64xBE<8>(p, x); } + static ASMJIT_INLINE void writeU64uBE(void* p, uint64_t x) noexcept { writeU64xBE<0>(p, x); } + + static ASMJIT_INLINE void writeU64a(void* p, uint64_t x) noexcept { writeU64x<8>(p, x); } + static ASMJIT_INLINE void writeU64u(void* p, uint64_t x) noexcept { writeU64x<0>(p, x); } + + static ASMJIT_INLINE void writeI64aLE(void* p, int64_t x) noexcept { writeI64xLE<8>(p, x); } + static ASMJIT_INLINE void writeI64uLE(void* p, int64_t x) noexcept { writeI64xLE<0>(p, x); } + + static ASMJIT_INLINE void writeI64aBE(void* p, int64_t x) noexcept { writeI64xBE<8>(p, x); } + static ASMJIT_INLINE void writeI64uBE(void* p, int64_t x) noexcept { writeI64xBE<0>(p, x); } + + static ASMJIT_INLINE void writeI64a(void* p, int64_t x) noexcept { writeI64x<8>(p, x); } + static ASMJIT_INLINE void writeI64u(void* p, int64_t x) noexcept { writeI64x<0>(p, x); } + + // -------------------------------------------------------------------------- + // [GetTickCount] // -------------------------------------------------------------------------- //! Get the current CPU tick count, used for benchmarking (1ms resolution). - static ASMJIT_API uint32_t getTickCount(); + static ASMJIT_API uint32_t getTickCount() noexcept; }; // ============================================================================ @@ -523,13 +977,13 @@ union UInt64 { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64 fromUInt64(uint64_t val) { + ASMJIT_INLINE UInt64 fromUInt64(uint64_t val) noexcept { UInt64 data; data.setUInt64(val); return data; } - ASMJIT_INLINE UInt64 fromUInt64(const UInt64& val) { + ASMJIT_INLINE UInt64 fromUInt64(const UInt64& val) noexcept { UInt64 data; data.setUInt64(val); return data; @@ -539,8 +993,8 @@ union UInt64 { // [Reset] // -------------------------------------------------------------------------- - ASMJIT_INLINE void reset() { - if (kArchHost64Bit) { + ASMJIT_INLINE void reset() noexcept { + if (ASMJIT_ARCH_64BIT) { u64 = 0; } else { @@ -553,17 +1007,17 @@ union UInt64 { // [Accessors] // -------------------------------------------------------------------------- - ASMJIT_INLINE uint64_t getUInt64() const { + ASMJIT_INLINE uint64_t getUInt64() const noexcept { return u64; } - ASMJIT_INLINE UInt64& setUInt64(uint64_t val) { + ASMJIT_INLINE UInt64& setUInt64(uint64_t val) noexcept { u64 = val; return *this; } - ASMJIT_INLINE UInt64& setUInt64(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& setUInt64(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 = val.u64; } else { @@ -573,8 +1027,8 @@ union UInt64 { return *this; } - ASMJIT_INLINE UInt64& setPacked_2x32(uint32_t u0, uint32_t u1) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& setPacked_2x32(uint32_t u0, uint32_t u1) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 = Utils::pack64_2x32(u0, u1); } else { @@ -588,13 +1042,13 @@ union UInt64 { // [Add] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& add(uint64_t val) { + ASMJIT_INLINE UInt64& add(uint64_t val) noexcept { u64 += val; return *this; } - ASMJIT_INLINE UInt64& add(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& add(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 += val.u64; } else { @@ -608,13 +1062,13 @@ union UInt64 { // [Sub] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& sub(uint64_t val) { + ASMJIT_INLINE UInt64& sub(uint64_t val) noexcept { u64 -= val; return *this; } - ASMJIT_INLINE UInt64& sub(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& sub(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 -= val.u64; } else { @@ -628,13 +1082,13 @@ union UInt64 { // [And] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& and_(uint64_t val) { + ASMJIT_INLINE UInt64& and_(uint64_t val) noexcept { u64 &= val; return *this; } - ASMJIT_INLINE UInt64& and_(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& and_(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 &= val.u64; } else { @@ -648,13 +1102,13 @@ union UInt64 { // [AndNot] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& andNot(uint64_t val) { + ASMJIT_INLINE UInt64& andNot(uint64_t val) noexcept { u64 &= ~val; return *this; } - ASMJIT_INLINE UInt64& andNot(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& andNot(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 &= ~val.u64; } else { @@ -668,13 +1122,13 @@ union UInt64 { // [Or] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& or_(uint64_t val) { + ASMJIT_INLINE UInt64& or_(uint64_t val) noexcept { u64 |= val; return *this; } - ASMJIT_INLINE UInt64& or_(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& or_(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 |= val.u64; } else { @@ -688,13 +1142,13 @@ union UInt64 { // [Xor] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& xor_(uint64_t val) { + ASMJIT_INLINE UInt64& xor_(uint64_t val) noexcept { u64 ^= val; return *this; } - ASMJIT_INLINE UInt64& xor_(const UInt64& val) { - if (kArchHost64Bit) { + ASMJIT_INLINE UInt64& xor_(const UInt64& val) noexcept { + if (ASMJIT_ARCH_64BIT) { u64 ^= val.u64; } else { @@ -708,58 +1162,67 @@ union UInt64 { // [Eq] // -------------------------------------------------------------------------- - ASMJIT_INLINE bool isZero() const { - return kArchHost64Bit ? u64 == 0 : (u32[0] | u32[1]) == 0; + ASMJIT_INLINE bool isZero() const noexcept { + if (ASMJIT_ARCH_64BIT) + return u64 == 0; + else + return (u32[0] | u32[1]) == 0; } - ASMJIT_INLINE bool isNonZero() const { - return kArchHost64Bit ? u64 != 0 : (u32[0] | u32[1]) != 0; + ASMJIT_INLINE bool isNonZero() const noexcept { + if (ASMJIT_ARCH_64BIT) + return u64 != 0; + else + return (u32[0] | u32[1]) != 0; } - ASMJIT_INLINE bool eq(uint64_t val) const { + ASMJIT_INLINE bool eq(uint64_t val) const noexcept { return u64 == val; } - ASMJIT_INLINE bool eq(const UInt64& val) const { - return kArchHost64Bit ? u64 == val.u64 : (u32[0] == val.u32[0]) & (u32[1] == val.u32[1]); + ASMJIT_INLINE bool eq(const UInt64& val) const noexcept { + if (ASMJIT_ARCH_64BIT) + return u64 == val.u64; + else + return u32[0] == val.u32[0] && u32[1] == val.u32[1]; } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE UInt64& operator+=(uint64_t val) { return add(val); } - ASMJIT_INLINE UInt64& operator+=(const UInt64& val) { return add(val); } + ASMJIT_INLINE UInt64& operator+=(uint64_t val) noexcept { return add(val); } + ASMJIT_INLINE UInt64& operator+=(const UInt64& val) noexcept { return add(val); } - ASMJIT_INLINE UInt64& operator-=(uint64_t val) { return sub(val); } - ASMJIT_INLINE UInt64& operator-=(const UInt64& val) { return sub(val); } + ASMJIT_INLINE UInt64& operator-=(uint64_t val) noexcept { return sub(val); } + ASMJIT_INLINE UInt64& operator-=(const UInt64& val) noexcept { return sub(val); } - ASMJIT_INLINE UInt64& operator&=(uint64_t val) { return and_(val); } - ASMJIT_INLINE UInt64& operator&=(const UInt64& val) { return and_(val); } + ASMJIT_INLINE UInt64& operator&=(uint64_t val) noexcept { return and_(val); } + ASMJIT_INLINE UInt64& operator&=(const UInt64& val) noexcept { return and_(val); } - ASMJIT_INLINE UInt64& operator|=(uint64_t val) { return or_(val); } - ASMJIT_INLINE UInt64& operator|=(const UInt64& val) { return or_(val); } + ASMJIT_INLINE UInt64& operator|=(uint64_t val) noexcept { return or_(val); } + ASMJIT_INLINE UInt64& operator|=(const UInt64& val) noexcept { return or_(val); } - ASMJIT_INLINE UInt64& operator^=(uint64_t val) { return xor_(val); } - ASMJIT_INLINE UInt64& operator^=(const UInt64& val) { return xor_(val); } + ASMJIT_INLINE UInt64& operator^=(uint64_t val) noexcept { return xor_(val); } + ASMJIT_INLINE UInt64& operator^=(const UInt64& val) noexcept { return xor_(val); } - ASMJIT_INLINE bool operator==(uint64_t val) const { return eq(val); } - ASMJIT_INLINE bool operator==(const UInt64& val) const { return eq(val); } + ASMJIT_INLINE bool operator==(uint64_t val) const noexcept { return eq(val); } + ASMJIT_INLINE bool operator==(const UInt64& val) const noexcept { return eq(val); } - ASMJIT_INLINE bool operator!=(uint64_t val) const { return !eq(val); } - ASMJIT_INLINE bool operator!=(const UInt64& val) const { return !eq(val); } + ASMJIT_INLINE bool operator!=(uint64_t val) const noexcept { return !eq(val); } + ASMJIT_INLINE bool operator!=(const UInt64& val) const noexcept { return !eq(val); } - ASMJIT_INLINE bool operator<(uint64_t val) const { return u64 < val; } - ASMJIT_INLINE bool operator<(const UInt64& val) const { return u64 < val.u64; } + ASMJIT_INLINE bool operator<(uint64_t val) const noexcept { return u64 < val; } + ASMJIT_INLINE bool operator<(const UInt64& val) const noexcept { return u64 < val.u64; } - ASMJIT_INLINE bool operator<=(uint64_t val) const { return u64 <= val; } - ASMJIT_INLINE bool operator<=(const UInt64& val) const { return u64 <= val.u64; } + ASMJIT_INLINE bool operator<=(uint64_t val) const noexcept { return u64 <= val; } + ASMJIT_INLINE bool operator<=(const UInt64& val) const noexcept { return u64 <= val.u64; } - ASMJIT_INLINE bool operator>(uint64_t val) const { return u64 > val; } - ASMJIT_INLINE bool operator>(const UInt64& val) const { return u64 > val.u64; } + ASMJIT_INLINE bool operator>(uint64_t val) const noexcept { return u64 > val; } + ASMJIT_INLINE bool operator>(const UInt64& val) const noexcept { return u64 > val.u64; } - ASMJIT_INLINE bool operator>=(uint64_t val) const { return u64 >= val; } - ASMJIT_INLINE bool operator>=(const UInt64& val) const { return u64 >= val.u64; } + ASMJIT_INLINE bool operator>=(uint64_t val) const noexcept { return u64 >= val; } + ASMJIT_INLINE bool operator>=(const UInt64& val) const noexcept { return u64 >= val.u64; } // -------------------------------------------------------------------------- // [Members] @@ -799,14 +1262,14 @@ struct Lock { typedef CRITICAL_SECTION Handle; //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() { InitializeCriticalSection(&_handle); } + ASMJIT_INLINE Lock() noexcept { InitializeCriticalSection(&_handle); } //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() { DeleteCriticalSection(&_handle); } + ASMJIT_INLINE ~Lock() noexcept { DeleteCriticalSection(&_handle); } //! Lock. - ASMJIT_INLINE void lock() { EnterCriticalSection(&_handle); } + ASMJIT_INLINE void lock() noexcept { EnterCriticalSection(&_handle); } //! Unlock. - ASMJIT_INLINE void unlock() { LeaveCriticalSection(&_handle); } + ASMJIT_INLINE void unlock() noexcept { LeaveCriticalSection(&_handle); } #endif // ASMJIT_OS_WINDOWS // -------------------------------------------------------------------------- @@ -817,14 +1280,14 @@ struct Lock { typedef pthread_mutex_t Handle; //! Create a new `Lock` instance. - ASMJIT_INLINE Lock() { pthread_mutex_init(&_handle, nullptr); } + ASMJIT_INLINE Lock() noexcept { pthread_mutex_init(&_handle, nullptr); } //! Destroy the `Lock` instance. - ASMJIT_INLINE ~Lock() { pthread_mutex_destroy(&_handle); } + ASMJIT_INLINE ~Lock() noexcept { pthread_mutex_destroy(&_handle); } //! Lock. - ASMJIT_INLINE void lock() { pthread_mutex_lock(&_handle); } + ASMJIT_INLINE void lock() noexcept { pthread_mutex_lock(&_handle); } //! Unlock. - ASMJIT_INLINE void unlock() { pthread_mutex_unlock(&_handle); } + ASMJIT_INLINE void unlock() noexcept { pthread_mutex_unlock(&_handle); } #endif // ASMJIT_OS_POSIX // -------------------------------------------------------------------------- @@ -849,10 +1312,13 @@ struct AutoLock { // [Construction / Destruction] // -------------------------------------------------------------------------- - //! Lock `target`, scoped. - ASMJIT_INLINE AutoLock(Lock& target) : _target(target) { _target.lock(); } - //! Unlock `target`. - ASMJIT_INLINE ~AutoLock() { _target.unlock(); } + ASMJIT_INLINE AutoLock(Lock& target) noexcept : _target(target) { + _target.lock(); + } + + ASMJIT_INLINE ~AutoLock() noexcept { + _target.unlock(); + } // -------------------------------------------------------------------------- // [Members] diff --git a/src/asmjit/base/vectypes.h b/src/asmjit/base/vectypes.h index 0e160aa..14bebd0 100644 --- a/src/asmjit/base/vectypes.h +++ b/src/asmjit/base/vectypes.h @@ -30,155 +30,125 @@ union Vec64 { // -------------------------------------------------------------------------- //! Set all eight 8-bit signed integers. - static ASMJIT_INLINE Vec64 fromSb( - int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) - { + static ASMJIT_INLINE Vec64 fromSB(int8_t x0) noexcept { Vec64 self; - self.setSb(x0, x1, x2, x3, x4, x5, x6, x7); + self.setSB(x0); + return self; + } + + //! Set all eight 8-bit unsigned integers. + static ASMJIT_INLINE Vec64 fromUB(uint8_t x0) noexcept { + Vec64 self; + self.setUB(x0); return self; } //! Set all eight 8-bit signed integers. - static ASMJIT_INLINE Vec64 fromSb( - int8_t x0) - { + static ASMJIT_INLINE Vec64 fromSB( + int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { + Vec64 self; - self.setSb(x0); + self.setSB(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all eight 8-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUb( - uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) - { - Vec64 self; - self.setUb(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } + static ASMJIT_INLINE Vec64 fromUB( + uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { - //! Set all eight 8-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUb( - uint8_t x0) - { Vec64 self; - self.setUb(x0); + self.setUB(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all four 16-bit signed integers. - static ASMJIT_INLINE Vec64 fromSw( - int16_t x0, int16_t x1, int16_t x2, int16_t x3) - { + static ASMJIT_INLINE Vec64 fromSW(int16_t x0) noexcept { Vec64 self; - self.setSw(x0, x1, x2, x3); + self.setSW(x0); + return self; + } + + //! Set all four 16-bit unsigned integers. + static ASMJIT_INLINE Vec64 fromUW(uint16_t x0) noexcept { + Vec64 self; + self.setUW(x0); return self; } //! Set all four 16-bit signed integers. - static ASMJIT_INLINE Vec64 fromSw( - int16_t x0) - { + static ASMJIT_INLINE Vec64 fromSW(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { Vec64 self; - self.setSw(x0); + self.setSW(x0, x1, x2, x3); return self; } //! Set all four 16-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUw( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) - { + static ASMJIT_INLINE Vec64 fromUW(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { Vec64 self; - self.setUw(x0, x1, x2, x3); - return self; - } - - //! Set all four 16-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUw( - uint16_t x0) - { - Vec64 self; - self.setUw(x0); + self.setUW(x0, x1, x2, x3); return self; } //! Set all two 32-bit signed integers. - static ASMJIT_INLINE Vec64 fromSd( - int32_t x0, int32_t x1) - { + static ASMJIT_INLINE Vec64 fromSD(int32_t x0) noexcept { Vec64 self; - self.setSd(x0, x1); + self.setSD(x0); + return self; + } + + //! Set all two 32-bit unsigned integers. + static ASMJIT_INLINE Vec64 fromUD(uint32_t x0) noexcept { + Vec64 self; + self.setUD(x0); return self; } //! Set all two 32-bit signed integers. - static ASMJIT_INLINE Vec64 fromSd( - int32_t x0) - { + static ASMJIT_INLINE Vec64 fromSD(int32_t x0, int32_t x1) noexcept { Vec64 self; - self.setSd(x0); + self.setSD(x0, x1); return self; } //! Set all two 32-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUd( - uint32_t x0, uint32_t x1) - { + static ASMJIT_INLINE Vec64 fromUD(uint32_t x0, uint32_t x1) noexcept { Vec64 self; - self.setUd(x0, x1); - return self; - } - - //! Set all two 32-bit unsigned integers. - static ASMJIT_INLINE Vec64 fromUd( - uint32_t x0) - { - Vec64 self; - self.setUd(x0); + self.setUD(x0, x1); return self; } //! Set 64-bit signed integer. - static ASMJIT_INLINE Vec64 fromSq( - int64_t x0) - { + static ASMJIT_INLINE Vec64 fromSQ(int64_t x0) noexcept { Vec64 self; - self.setSq(x0); + self.setSQ(x0); return self; } //! Set 64-bit unsigned integer. - static ASMJIT_INLINE Vec64 fromUq( - uint64_t x0) - { + static ASMJIT_INLINE Vec64 fromUQ(uint64_t x0) noexcept { Vec64 self; - self.setUq(x0); + self.setUQ(x0); return self; } //! Set all two SP-FP values. - static ASMJIT_INLINE Vec64 fromSf( - float x0, float x1) - { + static ASMJIT_INLINE Vec64 fromSF(float x0) noexcept { Vec64 self; - self.setSf(x0, x1); + self.setSF(x0); return self; } //! Set all two SP-FP values. - static ASMJIT_INLINE Vec64 fromSf( - float x0) - { + static ASMJIT_INLINE Vec64 fromSF(float x0, float x1) noexcept { Vec64 self; - self.setSf(x0); + self.setSF(x0, x1); return self; } //! Set all two SP-FP values. - static ASMJIT_INLINE Vec64 fromDf( - double x0) - { + static ASMJIT_INLINE Vec64 fromDF(double x0) noexcept { Vec64 self; - self.setDf(x0); + self.setDF(x0); return self; } @@ -187,139 +157,109 @@ union Vec64 { // -------------------------------------------------------------------------- //! Set all eight 8-bit signed integers. - ASMJIT_INLINE void setSb( - int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) - { + ASMJIT_INLINE void setSB(int8_t x0) noexcept { + setUB(static_cast(x0)); + } + + //! Set all eight 8-bit unsigned integers. + ASMJIT_INLINE void setUB(uint8_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); + uq[0] = xq; + } + else { + uint32_t xd = static_cast(x0) * static_cast(0x01010101U); + ud[0] = xd; + ud[1] = xd; + } + } + + //! Set all eight 8-bit signed integers. + ASMJIT_INLINE void setSB( + int8_t x0, int8_t x1, int8_t x2, int8_t x3, int8_t x4, int8_t x5, int8_t x6, int8_t x7) noexcept { + sb[0] = x0; sb[1] = x1; sb[2] = x2; sb[3] = x3; sb[4] = x4; sb[5] = x5; sb[6] = x6; sb[7] = x7; } - //! Set all eight 8-bit signed integers. - ASMJIT_INLINE void setSb( - int8_t x0) - { - setUb(static_cast(x0)); - } - //! Set all eight 8-bit unsigned integers. - ASMJIT_INLINE void setUb( - uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) - { + ASMJIT_INLINE void setUB( + uint8_t x0, uint8_t x1, uint8_t x2, uint8_t x3, uint8_t x4, uint8_t x5, uint8_t x6, uint8_t x7) noexcept { + ub[0] = x0; ub[1] = x1; ub[2] = x2; ub[3] = x3; ub[4] = x4; ub[5] = x5; ub[6] = x6; ub[7] = x7; } - //! Set all eight 8-bit unsigned integers. - ASMJIT_INLINE void setUb( - uint8_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = t; + //! Set all four 16-bit signed integers. + ASMJIT_INLINE void setSW(int16_t x0) noexcept { + setUW(static_cast(x0)); + } + + //! Set all four 16-bit unsigned integers. + ASMJIT_INLINE void setUW(uint16_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); + uq[0] = xq; } else { - uint32_t t = static_cast(x0) * static_cast(0x01010101U); - ud[0] = t; - ud[1] = t; + uint32_t xd = static_cast(x0) * static_cast(0x00010001U); + ud[0] = xd; + ud[1] = xd; } } //! Set all four 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0, int16_t x1, int16_t x2, int16_t x3) - { + ASMJIT_INLINE void setSW(int16_t x0, int16_t x1, int16_t x2, int16_t x3) noexcept { sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; } - //! Set all four 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0) - { - setUw(static_cast(x0)); - } - //! Set all four 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) - { + ASMJIT_INLINE void setUW(uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3) noexcept { uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; } - //! Set all four 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = t; - } - else { - uint32_t t = static_cast(x0) * static_cast(0x00010001U); - ud[0] = t; - ud[1] = t; - } - } - //! Set all two 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0, int32_t x1) - { - sd[0] = x0; sd[1] = x1; - } - - //! Set all two 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0) - { + ASMJIT_INLINE void setSD(int32_t x0) noexcept { sd[0] = x0; sd[1] = x0; } //! Set all two 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0, uint32_t x1) - { - ud[0] = x0; ud[1] = x1; - } - - //! Set all two 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0) - { + ASMJIT_INLINE void setUD(uint32_t x0) noexcept { ud[0] = x0; ud[1] = x0; } + //! Set all two 32-bit signed integers. + ASMJIT_INLINE void setSD(int32_t x0, int32_t x1) noexcept { + sd[0] = x0; sd[1] = x1; + } + + //! Set all two 32-bit unsigned integers. + ASMJIT_INLINE void setUD(uint32_t x0, uint32_t x1) noexcept { + ud[0] = x0; ud[1] = x1; + } + //! Set 64-bit signed integer. - ASMJIT_INLINE void setSq( - int64_t x0) - { + ASMJIT_INLINE void setSQ(int64_t x0) noexcept { sq[0] = x0; } //! Set 64-bit unsigned integer. - ASMJIT_INLINE void setUq( - uint64_t x0) - { + ASMJIT_INLINE void setUQ(uint64_t x0) noexcept { uq[0] = x0; } //! Set all two SP-FP values. - ASMJIT_INLINE void setSf( - float x0, float x1) - { - sf[0] = x0; sf[1] = x1; - } - - //! Set all two SP-FP values. - ASMJIT_INLINE void setSf( - float x0) - { + ASMJIT_INLINE void setSF(float x0) noexcept { sf[0] = x0; sf[1] = x0; } //! Set all two SP-FP values. - ASMJIT_INLINE void setDf( - double x0) - { + ASMJIT_INLINE void setSF(float x0, float x1) noexcept { + sf[0] = x0; sf[1] = x1; + } + + //! Set all two SP-FP values. + ASMJIT_INLINE void setDF(double x0) noexcept { df[0] = x0; } @@ -361,188 +301,156 @@ union Vec128 { // -------------------------------------------------------------------------- //! Set all sixteen 8-bit signed integers. - static ASMJIT_INLINE Vec128 fromSb( - int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , - int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , - int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15) - { + static ASMJIT_INLINE Vec128 fromSB(int8_t x0) noexcept { Vec128 self; - self.setSb(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); + self.setSB(x0); + return self; + } + + //! Set all sixteen 8-bit unsigned integers. + static ASMJIT_INLINE Vec128 fromUB(uint8_t x0) noexcept { + Vec128 self; + self.setUB(x0); return self; } //! Set all sixteen 8-bit signed integers. - static ASMJIT_INLINE Vec128 fromSb( - int8_t x0) - { + static ASMJIT_INLINE Vec128 fromSB( + int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , + int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , + int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, + int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { + Vec128 self; - self.setSb(x0); + self.setSB(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); return self; } //! Set all sixteen 8-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUb( + static ASMJIT_INLINE Vec128 fromUB( uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) - { - Vec128 self; - self.setUb(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { - //! Set all sixteen 8-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUb( - uint8_t x0) - { Vec128 self; - self.setUb(x0); + self.setUB(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); return self; } //! Set all eight 16-bit signed integers. - static ASMJIT_INLINE Vec128 fromSw( - int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) - { + static ASMJIT_INLINE Vec128 fromSW(int16_t x0) noexcept { Vec128 self; - self.setSw(x0, x1, x2, x3, x4, x5, x6, x7); + self.setSW(x0); + return self; + } + + //! Set all eight 16-bit unsigned integers. + static ASMJIT_INLINE Vec128 fromUW(uint16_t x0) noexcept { + Vec128 self; + self.setUW(x0); return self; } //! Set all eight 16-bit signed integers. - static ASMJIT_INLINE Vec128 fromSw( - int16_t x0) - { + static ASMJIT_INLINE Vec128 fromSW( + int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { + Vec128 self; - self.setSw(x0); + self.setSW(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all eight 16-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUw( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) - { - Vec128 self; - self.setUw(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } + static ASMJIT_INLINE Vec128 fromUW( + uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { - //! Set all eight 16-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUw( - uint16_t x0) - { Vec128 self; - self.setUw(x0); + self.setUW(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all four 32-bit signed integers. - static ASMJIT_INLINE Vec128 fromSd( - int32_t x0, int32_t x1, int32_t x2, int32_t x3) - { + static ASMJIT_INLINE Vec128 fromSD(int32_t x0) noexcept { Vec128 self; - self.setSd(x0, x1, x2, x3); + self.setSD(x0); + return self; + } + + //! Set all four 32-bit unsigned integers. + static ASMJIT_INLINE Vec128 fromUD(uint32_t x0) noexcept { + Vec128 self; + self.setUD(x0); return self; } //! Set all four 32-bit signed integers. - static ASMJIT_INLINE Vec128 fromSd( - int32_t x0) - { + static ASMJIT_INLINE Vec128 fromSD(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { Vec128 self; - self.setSd(x0); + self.setSD(x0, x1, x2, x3); return self; } //! Set all four 32-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUd( - uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) - { + static ASMJIT_INLINE Vec128 fromUD(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { Vec128 self; - self.setUd(x0, x1, x2, x3); - return self; - } - - //! Set all four 32-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUd( - uint32_t x0) - { - Vec128 self; - self.setUd(x0); + self.setUD(x0, x1, x2, x3); return self; } //! Set all two 64-bit signed integers. - static ASMJIT_INLINE Vec128 fromSq( - int64_t x0, int64_t x1) - { + static ASMJIT_INLINE Vec128 fromSQ(int64_t x0) noexcept { Vec128 self; - self.setSq(x0, x1); + self.setSQ(x0); + return self; + } + + //! Set all two 64-bit unsigned integers. + static ASMJIT_INLINE Vec128 fromUQ(uint64_t x0) noexcept { + Vec128 self; + self.setUQ(x0); return self; } //! Set all two 64-bit signed integers. - static ASMJIT_INLINE Vec128 fromSq( - int64_t x0) - { + static ASMJIT_INLINE Vec128 fromSQ(int64_t x0, int64_t x1) noexcept { Vec128 self; - self.setSq(x0); + self.setSQ(x0, x1); return self; } //! Set all two 64-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUq( - uint64_t x0, uint64_t x1) - { + static ASMJIT_INLINE Vec128 fromUQ(uint64_t x0, uint64_t x1) noexcept { Vec128 self; - self.setUq(x0, x1); - return self; - } - - //! Set all two 64-bit unsigned integers. - static ASMJIT_INLINE Vec128 fromUq( - uint64_t x0) - { - Vec128 self; - self.setUq(x0); + self.setUQ(x0, x1); return self; } //! Set all four SP-FP floats. - static ASMJIT_INLINE Vec128 fromSf( - float x0, float x1, float x2, float x3) - { + static ASMJIT_INLINE Vec128 fromSF(float x0) noexcept { Vec128 self; - self.setSf(x0, x1, x2, x3); + self.setSF(x0); return self; } //! Set all four SP-FP floats. - static ASMJIT_INLINE Vec128 fromSf( - float x0) - { + static ASMJIT_INLINE Vec128 fromSF(float x0, float x1, float x2, float x3) noexcept { Vec128 self; - self.setSf(x0); + self.setSF(x0, x1, x2, x3); return self; } //! Set all two DP-FP floats. - static ASMJIT_INLINE Vec128 fromDf( - double x0, double x1) - { + static ASMJIT_INLINE Vec128 fromDF(double x0) noexcept { Vec128 self; - self.setDf(x0, x1); + self.setDF(x0); return self; } //! Set all two DP-FP floats. - static ASMJIT_INLINE Vec128 fromDf( - double x0) - { + static ASMJIT_INLINE Vec128 fromDF(double x0, double x1) noexcept { Vec128 self; - self.setDf(x0); + self.setDF(x0, x1); return self; } @@ -551,123 +459,97 @@ union Vec128 { // -------------------------------------------------------------------------- //! Set all sixteen 8-bit signed integers. - ASMJIT_INLINE void setSb( + ASMJIT_INLINE void setSB(int8_t x0) noexcept { + setUB(static_cast(x0)); + } + + //! Set all sixteen 8-bit unsigned integers. + ASMJIT_INLINE void setUB(uint8_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); + uq[0] = xq; + uq[1] = xq; + } + else { + uint32_t xd = static_cast(x0) * static_cast(0x01010101U); + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + } + } + + //! Set all sixteen 8-bit signed integers. + ASMJIT_INLINE void setSB( int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, - int8_t x12, int8_t x13, int8_t x14, int8_t x15) - { + int8_t x12, int8_t x13, int8_t x14, int8_t x15) noexcept { + sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; sb[12] = x12; sb[13] = x13; sb[14] = x14; sb[15] = x15; } - //! Set all sixteen 8-bit signed integers. - ASMJIT_INLINE void setSb( - int8_t x0) - { - setUb(static_cast(x0)); - } - //! Set all sixteen 8-bit unsigned integers. - ASMJIT_INLINE void setUb( + ASMJIT_INLINE void setUB( uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, - uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) - { + uint8_t x12, uint8_t x13, uint8_t x14, uint8_t x15) noexcept { + ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; ub[12] = x12; ub[13] = x13; ub[14] = x14; ub[15] = x15; } - //! Set all sixteen 8-bit unsigned integers. - ASMJIT_INLINE void setUb( - uint8_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0) * ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = t; - uq[1] = t; + //! Set all eight 16-bit signed integers. + ASMJIT_INLINE void setSW(int16_t x0) noexcept { + setUW(static_cast(x0)); + } + + //! Set all eight 16-bit unsigned integers. + ASMJIT_INLINE void setUW(uint16_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); + uq[0] = xq; + uq[1] = xq; } else { - uint32_t t = static_cast(x0) * static_cast(0x01010101U); - ud[0] = t; - ud[1] = t; - ud[2] = t; - ud[3] = t; + uint32_t xd = static_cast(x0) * static_cast(0x00010001U); + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; } } //! Set all eight 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) - { + ASMJIT_INLINE void setSW( + int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7) noexcept { + sw[0] = x0; sw[1] = x1; sw[2] = x2; sw[3] = x3; sw[4] = x4; sw[5] = x5; sw[6] = x6; sw[7] = x7; } - //! Set all eight 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0) - { - setUw(static_cast(x0)); - } - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) - { + ASMJIT_INLINE void setUW( + uint16_t x0, uint16_t x1, uint16_t x2, uint16_t x3, uint16_t x4, uint16_t x5, uint16_t x6, uint16_t x7) noexcept { + uw[0] = x0; uw[1] = x1; uw[2] = x2; uw[3] = x3; uw[4] = x4; uw[5] = x5; uw[6] = x6; uw[7] = x7; } - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0) * ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = t; - uq[1] = t; - } - else { - uint32_t t = static_cast(x0) * static_cast(0x00010001U); - ud[0] = t; - ud[1] = t; - ud[2] = t; - ud[3] = t; - } - } - //! Set all four 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0, int32_t x1, int32_t x2, int32_t x3) - { - sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; - } - - //! Set all four 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0) - { - setUd(static_cast(x0)); + ASMJIT_INLINE void setSD(int32_t x0) noexcept { + setUD(static_cast(x0)); } //! Set all four 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) - { - ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; - } - - //! Set all four 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0) - { - if (kArchHost64Bit) { + ASMJIT_INLINE void setUD(uint32_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { uint64_t t = (static_cast(x0) << 32) + x0; uq[0] = t; uq[1] = t; @@ -680,62 +562,56 @@ union Vec128 { } } - //! Set all two 64-bit signed integers. - ASMJIT_INLINE void setSq( - int64_t x0, int64_t x1) - { - sq[0] = x0; sq[1] = x1; + //! Set all four 32-bit signed integers. + ASMJIT_INLINE void setSD(int32_t x0, int32_t x1, int32_t x2, int32_t x3) noexcept { + sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; + } + + //! Set all four 32-bit unsigned integers. + ASMJIT_INLINE void setUD(uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3) noexcept { + ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; } //! Set all two 64-bit signed integers. - ASMJIT_INLINE void setSq( - int64_t x0) - { + ASMJIT_INLINE void setSQ(int64_t x0) noexcept { sq[0] = x0; sq[1] = x0; } //! Set all two 64-bit unsigned integers. - ASMJIT_INLINE void setUq( - uint64_t x0, uint64_t x1) - { - uq[0] = x0; uq[1] = x1; - } - - //! Set all two 64-bit unsigned integers. - ASMJIT_INLINE void setUq( - uint64_t x0) - { + ASMJIT_INLINE void setUQ(uint64_t x0) noexcept { uq[0] = x0; uq[1] = x0; } - //! Set all four SP-FP floats. - ASMJIT_INLINE void setSf( - float x0, float x1, float x2, float x3) - { - sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; + //! Set all two 64-bit signed integers. + ASMJIT_INLINE void setSQ(int64_t x0, int64_t x1) noexcept { + sq[0] = x0; sq[1] = x1; + } + + //! Set all two 64-bit unsigned integers. + ASMJIT_INLINE void setUQ(uint64_t x0, uint64_t x1) noexcept { + uq[0] = x0; uq[1] = x1; } //! Set all four SP-FP floats. - ASMJIT_INLINE void setSf( - float x0) - { + ASMJIT_INLINE void setSF(float x0) noexcept { sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; } - //! Set all two DP-FP floats. - ASMJIT_INLINE void setDf( - double x0, double x1) - { - df[0] = x0; df[1] = x1; + //! Set all four SP-FP floats. + ASMJIT_INLINE void setSF(float x0, float x1, float x2, float x3) noexcept { + sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; } //! Set all two DP-FP floats. - ASMJIT_INLINE void setDf( - double x0) - { + ASMJIT_INLINE void setDF(double x0) noexcept { df[0] = x0; df[1] = x0; } + //! Set all two DP-FP floats. + ASMJIT_INLINE void setDF(double x0, double x1) noexcept { + df[0] = x0; df[1] = x1; + } + // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- @@ -774,7 +650,21 @@ union Vec256 { // -------------------------------------------------------------------------- //! Set all thirty two 8-bit signed integers. - static ASMJIT_INLINE Vec256 fromSb( + static ASMJIT_INLINE Vec256 fromSB(int8_t x0) noexcept { + Vec256 self; + self.setSB(x0); + return self; + } + + //! Set all thirty two 8-bit unsigned integers. + static ASMJIT_INLINE Vec256 fromUB(uint8_t x0) noexcept { + Vec256 self; + self.setUB(x0); + return self; + } + + //! Set all thirty two 8-bit signed integers. + static ASMJIT_INLINE Vec256 fromSB( int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, @@ -782,26 +672,17 @@ union Vec256 { int8_t x16, int8_t x17, int8_t x18, int8_t x19, int8_t x20, int8_t x21, int8_t x22, int8_t x23, int8_t x24, int8_t x25, int8_t x26, int8_t x27, - int8_t x28, int8_t x29, int8_t x30, int8_t x31) - { + int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { + Vec256 self; - self.setSb( + self.setSB( x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); return self; } - //! Set all thirty two 8-bit signed integers. - static ASMJIT_INLINE Vec256 fromSb( - int8_t x0) - { - Vec256 self; - self.setSb(x0); - return self; - } - //! Set all thirty two 8-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUb( + static ASMJIT_INLINE Vec256 fromUB( uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, @@ -809,170 +690,139 @@ union Vec256 { uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, - uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) - { + uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { + Vec256 self; - self.setUb( + self.setUB( x0, x1 , x2 , x3 , x4 , x5 , x6 , x7 , x8 , x9 , x10, x11, x12, x13, x14, x15, x16, x17, x18, x19, x20, x21, x22, x23, x24, x25, x26, x27, x28, x29, x30, x31); return self; } - //! Set all thirty two 8-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUb( - uint8_t x0) - { + //! Set all sixteen 16-bit signed integers. + static ASMJIT_INLINE Vec256 fromSW(int16_t x0) noexcept { Vec256 self; - self.setUb(x0); + self.setSW(x0); + return self; + } + + //! Set all sixteen 16-bit unsigned integers. + static ASMJIT_INLINE Vec256 fromUW(uint16_t x0) noexcept { + Vec256 self; + self.setUW(x0); return self; } //! Set all sixteen 16-bit signed integers. - static ASMJIT_INLINE Vec256 fromSw( + static ASMJIT_INLINE Vec256 fromSW( int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7 , - int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) - { - Vec256 self; - self.setSw(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } + int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { - //! Set all sixteen 16-bit signed integers. - static ASMJIT_INLINE Vec256 fromSw( - int16_t x0) - { Vec256 self; - self.setSw(x0); + self.setSW(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); return self; } //! Set all sixteen 16-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUw( + static ASMJIT_INLINE Vec256 fromUW( uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 , - uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) - { - Vec256 self; - self.setUw(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); - return self; - } + uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { - //! Set all sixteen 16-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUw( - uint16_t x0) - { Vec256 self; - self.setUw(x0); + self.setUW(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15); return self; } //! Set all eight 32-bit signed integers. - static ASMJIT_INLINE Vec256 fromSd( + static ASMJIT_INLINE Vec256 fromSD(int32_t x0) noexcept { + Vec256 self; + self.setSD(x0); + return self; + } + + //! Set all eight 32-bit unsigned integers. + static ASMJIT_INLINE Vec256 fromUD(uint32_t x0) noexcept { + Vec256 self; + self.setUD(x0); + return self; + } + + //! Set all eight 32-bit signed integers. + static ASMJIT_INLINE Vec256 fromSD( int32_t x0, int32_t x1, int32_t x2, int32_t x3, - int32_t x4, int32_t x5, int32_t x6, int32_t x7) - { - Vec256 self; - self.setSd(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } + int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { - //! Set all eight 32-bit signed integers. - static ASMJIT_INLINE Vec256 fromSd( - int32_t x0) - { Vec256 self; - self.setSd(x0); + self.setSD(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all eight 32-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUd( + static ASMJIT_INLINE Vec256 fromUD( uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, - uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) - { - Vec256 self; - self.setUd(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } + uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { - //! Set all eight 32-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUd( - uint32_t x0) - { Vec256 self; - self.setUd(x0); + self.setUD(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all four 64-bit signed integers. - static ASMJIT_INLINE Vec256 fromSq( - int64_t x0, int64_t x1, int64_t x2, int64_t x3) - { + static ASMJIT_INLINE Vec256 fromSQ(int64_t x0) noexcept { Vec256 self; - self.setSq(x0, x1, x2, x3); + self.setSQ(x0); + return self; + } + + //! Set all four 64-bit unsigned integers. + static ASMJIT_INLINE Vec256 fromUQ(uint64_t x0) noexcept { + Vec256 self; + self.setUQ(x0); return self; } //! Set all four 64-bit signed integers. - static ASMJIT_INLINE Vec256 fromSq( - int64_t x0) - { + static ASMJIT_INLINE Vec256 fromSQ(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { Vec256 self; - self.setSq(x0); + self.setSQ(x0, x1, x2, x3); return self; } //! Set all four 64-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUq( - uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) - { + static ASMJIT_INLINE Vec256 fromUQ(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { Vec256 self; - self.setUq(x0, x1, x2, x3); - return self; - } - - //! Set all four 64-bit unsigned integers. - static ASMJIT_INLINE Vec256 fromUq( - uint64_t x0) - { - Vec256 self; - self.setUq(x0); + self.setUQ(x0, x1, x2, x3); return self; } //! Set all eight SP-FP floats. - static ASMJIT_INLINE Vec256 fromSf( + static ASMJIT_INLINE Vec256 fromSF(float x0) noexcept { + Vec256 self; + self.setSF(x0); + return self; + } + + //! Set all eight SP-FP floats. + static ASMJIT_INLINE Vec256 fromSF( float x0, float x1, float x2, float x3, - float x4, float x5, float x6, float x7) - { - Vec256 self; - self.setSf(x0, x1, x2, x3, x4, x5, x6, x7); - return self; - } + float x4, float x5, float x6, float x7) noexcept { - //! Set all eight SP-FP floats. - static ASMJIT_INLINE Vec256 fromSf( - float x0) - { Vec256 self; - self.setSf(x0); + self.setSF(x0, x1, x2, x3, x4, x5, x6, x7); return self; } //! Set all four DP-FP floats. - static ASMJIT_INLINE Vec256 fromDf( - double x0, double x1, double x2, double x3) - { + static ASMJIT_INLINE Vec256 fromDF(double x0) noexcept { Vec256 self; - self.setDf(x0, x1, x2, x3); + self.setDF(x0); return self; } //! Set all four DP-FP floats. - static ASMJIT_INLINE Vec256 fromDf( - double x0) - { + static ASMJIT_INLINE Vec256 fromDF(double x0, double x1, double x2, double x3) noexcept { Vec256 self; - self.setDf(x0); + self.setDF(x0, x1, x2, x3); return self; } @@ -981,7 +831,34 @@ union Vec256 { // -------------------------------------------------------------------------- //! Set all thirty two 8-bit signed integers. - ASMJIT_INLINE void setSb( + ASMJIT_INLINE void setSB(int8_t x0) noexcept { + setUB(static_cast(x0)); + } + + //! Set all thirty two 8-bit unsigned integers. + ASMJIT_INLINE void setUB(uint8_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0)* ASMJIT_UINT64_C(0x0101010101010101); + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; + } + else { + uint32_t xd = static_cast(x0)* static_cast(0x01010101U); + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + ud[4] = xd; + ud[5] = xd; + ud[6] = xd; + ud[7] = xd; + } + } + + //! Set all thirty two 8-bit signed integers. + ASMJIT_INLINE void setSB( int8_t x0 , int8_t x1 , int8_t x2 , int8_t x3 , int8_t x4 , int8_t x5 , int8_t x6 , int8_t x7 , int8_t x8 , int8_t x9 , int8_t x10, int8_t x11, @@ -989,8 +866,8 @@ union Vec256 { int8_t x16, int8_t x17, int8_t x18, int8_t x19, int8_t x20, int8_t x21, int8_t x22, int8_t x23, int8_t x24, int8_t x25, int8_t x26, int8_t x27, - int8_t x28, int8_t x29, int8_t x30, int8_t x31) - { + int8_t x28, int8_t x29, int8_t x30, int8_t x31) noexcept { + sb[0 ] = x0 ; sb[1 ] = x1 ; sb[2 ] = x2 ; sb[3 ] = x3 ; sb[4 ] = x4 ; sb[5 ] = x5 ; sb[6 ] = x6 ; sb[7 ] = x7 ; sb[8 ] = x8 ; sb[9 ] = x9 ; sb[10] = x10; sb[11] = x11; @@ -1001,15 +878,8 @@ union Vec256 { sb[28] = x28; sb[29] = x29; sb[30] = x30; sb[31] = x31; } - //! Set all thirty two 8-bit signed integers. - ASMJIT_INLINE void setSb( - int8_t x0) - { - setUb(static_cast(x0)); - } - //! Set all thirty two 8-bit unsigned integers. - ASMJIT_INLINE void setUb( + ASMJIT_INLINE void setUB( uint8_t x0 , uint8_t x1 , uint8_t x2 , uint8_t x3 , uint8_t x4 , uint8_t x5 , uint8_t x6 , uint8_t x7 , uint8_t x8 , uint8_t x9 , uint8_t x10, uint8_t x11, @@ -1017,8 +887,8 @@ union Vec256 { uint8_t x16, uint8_t x17, uint8_t x18, uint8_t x19, uint8_t x20, uint8_t x21, uint8_t x22, uint8_t x23, uint8_t x24, uint8_t x25, uint8_t x26, uint8_t x27, - uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) - { + uint8_t x28, uint8_t x29, uint8_t x30, uint8_t x31) noexcept { + ub[0 ] = x0 ; ub[1 ] = x1 ; ub[2 ] = x2 ; ub[3 ] = x3 ; ub[4 ] = x4 ; ub[5 ] = x5 ; ub[6 ] = x6 ; ub[7 ] = x7 ; ub[8 ] = x8 ; ub[9 ] = x9 ; ub[10] = x10; ub[11] = x11; @@ -1029,118 +899,68 @@ union Vec256 { ub[28] = x28; ub[29] = x29; ub[30] = x30; ub[31] = x31; } - //! Set all thirty two 8-bit unsigned integers. - ASMJIT_INLINE void setUb( - uint8_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0)* ASMJIT_UINT64_C(0x0101010101010101); - uq[0] = t; - uq[1] = t; - uq[2] = t; - uq[3] = t; + //! Set all sixteen 16-bit signed integers. + ASMJIT_INLINE void setSW(int16_t x0) noexcept { + setUW(static_cast(x0)); + } + + //! Set all eight 16-bit unsigned integers. + ASMJIT_INLINE void setUW(uint16_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = static_cast(x0)* ASMJIT_UINT64_C(0x0001000100010001); + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; } else { - uint32_t t = static_cast(x0)* static_cast(0x01010101U); - ud[0] = t; - ud[1] = t; - ud[2] = t; - ud[3] = t; - ud[4] = t; - ud[5] = t; - ud[6] = t; - ud[7] = t; + uint32_t xd = static_cast(x0)* static_cast(0x00010001U); + ud[0] = xd; + ud[1] = xd; + ud[2] = xd; + ud[3] = xd; + ud[4] = xd; + ud[5] = xd; + ud[6] = xd; + ud[7] = xd; } } //! Set all sixteen 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0, int16_t x1, int16_t x2, int16_t x3, int16_t x4, int16_t x5, int16_t x6, int16_t x7, - int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) - { + ASMJIT_INLINE void setSW( + int16_t x0, int16_t x1, int16_t x2 , int16_t x3 , int16_t x4 , int16_t x5 , int16_t x6 , int16_t x7, + int16_t x8, int16_t x9, int16_t x10, int16_t x11, int16_t x12, int16_t x13, int16_t x14, int16_t x15) noexcept { + sw[0 ] = x0 ; sw[1 ] = x1 ; sw[2 ] = x2 ; sw[3 ] = x3 ; sw[4 ] = x4 ; sw[5 ] = x5 ; sw[6 ] = x6 ; sw[7 ] = x7 ; sw[8 ] = x8 ; sw[9 ] = x9 ; sw[10] = x10; sw[11] = x11; sw[12] = x12; sw[13] = x13; sw[14] = x14; sw[15] = x15; } - //! Set all sixteen 16-bit signed integers. - ASMJIT_INLINE void setSw( - int16_t x0) - { - setUw(static_cast(x0)); - } - //! Set all sixteen 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7 , - uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) - { + ASMJIT_INLINE void setUW( + uint16_t x0, uint16_t x1, uint16_t x2 , uint16_t x3 , uint16_t x4 , uint16_t x5 , uint16_t x6 , uint16_t x7, + uint16_t x8, uint16_t x9, uint16_t x10, uint16_t x11, uint16_t x12, uint16_t x13, uint16_t x14, uint16_t x15) noexcept { + uw[0 ] = x0 ; uw[1 ] = x1 ; uw[2 ] = x2 ; uw[3 ] = x3 ; uw[4 ] = x4 ; uw[5 ] = x5 ; uw[6 ] = x6 ; uw[7 ] = x7 ; uw[8 ] = x8 ; uw[9 ] = x9 ; uw[10] = x10; uw[11] = x11; uw[12] = x12; uw[13] = x13; uw[14] = x14; uw[15] = x15; } - //! Set all eight 16-bit unsigned integers. - ASMJIT_INLINE void setUw( - uint16_t x0) - { - if (kArchHost64Bit) { - uint64_t t = static_cast(x0)* ASMJIT_UINT64_C(0x0001000100010001); - uq[0] = t; - uq[1] = t; - uq[2] = t; - uq[3] = t; - } - else { - uint32_t t = static_cast(x0)* static_cast(0x00010001U); - ud[0] = t; - ud[1] = t; - ud[2] = t; - ud[3] = t; - ud[4] = t; - ud[5] = t; - ud[6] = t; - ud[7] = t; - } - } - //! Set all eight 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0, int32_t x1, int32_t x2, int32_t x3, - int32_t x4, int32_t x5, int32_t x6, int32_t x7) - { - sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; - sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7; - } - - //! Set all eight 32-bit signed integers. - ASMJIT_INLINE void setSd( - int32_t x0) - { - setUd(static_cast(x0)); + ASMJIT_INLINE void setSD(int32_t x0) noexcept { + setUD(static_cast(x0)); } //! Set all eight 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, - uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) - { - ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; - ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7; - } - - //! Set all eight 32-bit unsigned integers. - ASMJIT_INLINE void setUd( - uint32_t x0) - { - if (kArchHost64Bit) { - uint64_t t = (static_cast(x0) << 32) + x0; - uq[0] = t; - uq[1] = t; - uq[2] = t; - uq[3] = t; + ASMJIT_INLINE void setUD(uint32_t x0) noexcept { + if (ASMJIT_ARCH_64BIT) { + uint64_t xq = (static_cast(x0) << 32) + x0; + uq[0] = xq; + uq[1] = xq; + uq[2] = xq; + uq[3] = xq; } else { ud[0] = x0; @@ -1154,65 +974,69 @@ union Vec256 { } } - //! Set all four 64-bit signed integers. - ASMJIT_INLINE void setSq( - int64_t x0, int64_t x1, int64_t x2, int64_t x3) - { - sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3; + //! Set all eight 32-bit signed integers. + ASMJIT_INLINE void setSD( + int32_t x0, int32_t x1, int32_t x2, int32_t x3, + int32_t x4, int32_t x5, int32_t x6, int32_t x7) noexcept { + + sd[0] = x0; sd[1] = x1; sd[2] = x2; sd[3] = x3; + sd[4] = x4; sd[5] = x5; sd[6] = x6; sd[7] = x7; + } + + //! Set all eight 32-bit unsigned integers. + ASMJIT_INLINE void setUD( + uint32_t x0, uint32_t x1, uint32_t x2, uint32_t x3, + uint32_t x4, uint32_t x5, uint32_t x6, uint32_t x7) noexcept { + + ud[0] = x0; ud[1] = x1; ud[2] = x2; ud[3] = x3; + ud[4] = x4; ud[5] = x5; ud[6] = x6; ud[7] = x7; } //! Set all four 64-bit signed integers. - ASMJIT_INLINE void setSq( - int64_t x0) - { + ASMJIT_INLINE void setSQ(int64_t x0) noexcept { sq[0] = x0; sq[1] = x0; sq[2] = x0; sq[3] = x0; } //! Set all four 64-bit unsigned integers. - ASMJIT_INLINE void setUq( - uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) - { - uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3; - } - - //! Set all four 64-bit unsigned integers. - ASMJIT_INLINE void setUq( - uint64_t x0) - { + ASMJIT_INLINE void setUQ(uint64_t x0) noexcept { uq[0] = x0; uq[1] = x0; uq[2] = x0; uq[3] = x0; } - //! Set all eight SP-FP floats. - ASMJIT_INLINE void setSf( - float x0, float x1, float x2, float x3, - float x4, float x5, float x6, float x7) - { - sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; - sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7; + //! Set all four 64-bit signed integers. + ASMJIT_INLINE void setSQ(int64_t x0, int64_t x1, int64_t x2, int64_t x3) noexcept { + sq[0] = x0; sq[1] = x1; sq[2] = x2; sq[3] = x3; + } + + //! Set all four 64-bit unsigned integers. + ASMJIT_INLINE void setUQ(uint64_t x0, uint64_t x1, uint64_t x2, uint64_t x3) noexcept { + uq[0] = x0; uq[1] = x1; uq[2] = x2; uq[3] = x3; } //! Set all eight SP-FP floats. - ASMJIT_INLINE void setSf( - float x0) - { + ASMJIT_INLINE void setSF(float x0) noexcept { sf[0] = x0; sf[1] = x0; sf[2] = x0; sf[3] = x0; sf[4] = x0; sf[5] = x0; sf[6] = x0; sf[7] = x0; } - //! Set all four DP-FP floats. - ASMJIT_INLINE void setDf( - double x0, double x1, double x2, double x3) - { - df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3; + //! Set all eight SP-FP floats. + ASMJIT_INLINE void setSF( + float x0, float x1, float x2, float x3, + float x4, float x5, float x6, float x7) noexcept { + + sf[0] = x0; sf[1] = x1; sf[2] = x2; sf[3] = x3; + sf[4] = x4; sf[5] = x5; sf[6] = x6; sf[7] = x7; } //! Set all four DP-FP floats. - ASMJIT_INLINE void setDf( - double x0) - { + ASMJIT_INLINE void setDF(double x0) noexcept { df[0] = x0; df[1] = x0; df[2] = x0; df[3] = x0; } + //! Set all four DP-FP floats. + ASMJIT_INLINE void setDF(double x0, double x1, double x2, double x3) noexcept { + df[0] = x0; df[1] = x1; df[2] = x2; df[3] = x3; + } + // -------------------------------------------------------------------------- // [Members] // -------------------------------------------------------------------------- diff --git a/src/asmjit/base/vmem.cpp b/src/asmjit/base/vmem.cpp index a27fc0d..29cbe5b 100644 --- a/src/asmjit/base/vmem.cpp +++ b/src/asmjit/base/vmem.cpp @@ -71,7 +71,7 @@ namespace asmjit { struct VMemLocal { // AsmJit allows to pass a `nullptr` handle to `VMemUtil`. This function is // just a convenient way to convert such handle to the current process one. - ASMJIT_INLINE HANDLE getSafeProcessHandle(HANDLE hParam) const { + ASMJIT_INLINE HANDLE getSafeProcessHandle(HANDLE hParam) const noexcept { return hParam != nullptr ? hParam : hProcess; } @@ -81,7 +81,7 @@ struct VMemLocal { }; static VMemLocal vMemLocal; -static const VMemLocal& vMemGet() { +static const VMemLocal& vMemGet() noexcept { VMemLocal& vMem = vMemLocal; if (!vMem.hProcess) { @@ -97,21 +97,21 @@ static const VMemLocal& vMemGet() { return vMem; }; -size_t VMemUtil::getPageSize() { +size_t VMemUtil::getPageSize() noexcept { const VMemLocal& vMem = vMemGet(); return vMem.pageSize; } -size_t VMemUtil::getPageGranularity() { +size_t VMemUtil::getPageGranularity() noexcept { const VMemLocal& vMem = vMemGet(); return vMem.pageGranularity; } -void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) { +void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept { return allocProcessMemory(static_cast(0), length, allocated, flags); } -void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) { +void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept { if (length == 0) return nullptr; @@ -141,11 +141,11 @@ void* VMemUtil::allocProcessMemory(HANDLE hProcess, size_t length, size_t* alloc return mBase; } -Error VMemUtil::release(void* addr, size_t length) { +Error VMemUtil::release(void* addr, size_t length) noexcept { return releaseProcessMemory(static_cast(0), addr, length); } -Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* length */) { +Error VMemUtil::releaseProcessMemory(HANDLE hProcess, void* addr, size_t /* length */) noexcept { hProcess = vMemGet().getSafeProcessHandle(hProcess); if (!::VirtualFreeEx(hProcess, addr, 0, MEM_RELEASE)) return kErrorInvalidState; @@ -171,7 +171,7 @@ struct VMemLocal { }; static VMemLocal vMemLocal; -static const VMemLocal& vMemGet() { +static const VMemLocal& vMemGet() noexcept { VMemLocal& vMem = vMemLocal; if (!vMem.pageSize) { @@ -183,17 +183,17 @@ static const VMemLocal& vMemGet() { return vMem; }; -size_t VMemUtil::getPageSize() { +size_t VMemUtil::getPageSize() noexcept { const VMemLocal& vMem = vMemGet(); return vMem.pageSize; } -size_t VMemUtil::getPageGranularity() { +size_t VMemUtil::getPageGranularity() noexcept { const VMemLocal& vMem = vMemGet(); return vMem.pageGranularity; } -void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) { +void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) noexcept { const VMemLocal& vMem = vMemGet(); size_t msize = Utils::alignTo(length, vMem.pageSize); int protection = PROT_READ; @@ -210,7 +210,7 @@ void* VMemUtil::alloc(size_t length, size_t* allocated, uint32_t flags) { return mbase; } -Error VMemUtil::release(void* addr, size_t length) { +Error VMemUtil::release(void* addr, size_t length) noexcept { if (::munmap(addr, length) != 0) return kErrorInvalidState; @@ -233,7 +233,7 @@ enum { //! \internal //! //! Set `len` bits in `buf` starting at `index` bit index. -static void _SetBits(size_t* buf, size_t index, size_t len) { +static void _SetBits(size_t* buf, size_t index, size_t len) noexcept { if (len == 0) return; @@ -290,14 +290,14 @@ struct VMemMgr::RbNode { //! \internal //! //! Get whether the node is red (nullptr or node with red flag). -static ASMJIT_INLINE bool rbIsRed(RbNode* node) { +static ASMJIT_INLINE bool rbIsRed(RbNode* node) noexcept { return node != nullptr && node->red; } //! \internal //! //! Check whether the RB tree is valid. -static int rbAssert(RbNode* root) { +static int rbAssert(RbNode* root) noexcept { if (root == nullptr) return 1; @@ -327,7 +327,7 @@ static int rbAssert(RbNode* root) { //! \internal //! //! Single rotation. -static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) { +static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) noexcept { RbNode* save = root->node[!dir]; root->node[!dir] = save->node[dir]; @@ -342,7 +342,7 @@ static ASMJIT_INLINE RbNode* rbRotateSingle(RbNode* root, int dir) { //! \internal //! //! Double rotation. -static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) { +static ASMJIT_INLINE RbNode* rbRotateDouble(RbNode* root, int dir) noexcept { root->node[!dir] = rbRotateSingle(root->node[!dir], !dir); return rbRotateSingle(root, dir); } @@ -357,11 +357,11 @@ struct VMemMgr::MemNode : public RbNode { // -------------------------------------------------------------------------- // Get available space. - ASMJIT_INLINE size_t getAvailable() const { + ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; } - ASMJIT_INLINE void fillData(MemNode* other) { + ASMJIT_INLINE void fillData(MemNode* other) noexcept { mem = other->mem; size = other->size; @@ -404,7 +404,7 @@ struct VMemMgr::PermanentNode { // -------------------------------------------------------------------------- //! Get available space. - ASMJIT_INLINE size_t getAvailable() const { + ASMJIT_INLINE size_t getAvailable() const noexcept { return size - used; } @@ -425,7 +425,7 @@ struct VMemMgr::PermanentNode { //! \internal //! //! Helper to avoid `#ifdef`s in the code. -ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) { +ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSize) noexcept { uint32_t flags = kVMemFlagWritable | kVMemFlagExecutable; #if !ASMJIT_OS_WINDOWS return static_cast(VMemUtil::alloc(size, vSize, flags)); @@ -437,7 +437,7 @@ ASMJIT_INLINE uint8_t* vMemMgrAllocVMem(VMemMgr* self, size_t size, size_t* vSiz //! \internal //! //! Helper to avoid `#ifdef`s in the code. -ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) { +ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) noexcept { #if !ASMJIT_OS_WINDOWS return VMemUtil::release(p, vSize); #else @@ -448,7 +448,7 @@ ASMJIT_INLINE Error vMemMgrReleaseVMem(VMemMgr* self, void* p, size_t vSize) { //! \internal //! //! Check whether the Red-Black tree is valid. -static bool vMemMgrCheckTree(VMemMgr* self) { +static bool vMemMgrCheckTree(VMemMgr* self) noexcept { return rbAssert(self->_root) > 0; } @@ -457,7 +457,7 @@ static bool vMemMgrCheckTree(VMemMgr* self) { //! Alloc virtual memory including a heap memory needed for `MemNode` data. //! //! Returns set-up `MemNode*` or nullptr if allocation failed. -static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) { +static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) noexcept { size_t vSize; uint8_t* vmem = vMemMgrAllocVMem(self, size, &vSize); @@ -502,7 +502,7 @@ static MemNode* vMemMgrCreateNode(VMemMgr* self, size_t size, size_t density) { return node; } -static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) { +static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) noexcept { if (self->_root == nullptr) { // Empty tree case. self->_root = node; @@ -586,7 +586,7 @@ static void vMemMgrInsertNode(VMemMgr* self, MemNode* node) { //! //! Returns node that should be freed, but it doesn't have to be necessarily //! the `node` passed. -static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) { +static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) noexcept { // False tree root. RbNode head = { { nullptr, nullptr }, 0, 0 }; @@ -686,7 +686,7 @@ static MemNode* vMemMgrRemoveNode(VMemMgr* self, MemNode* node) { return static_cast(q); } -static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) { +static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) noexcept { MemNode* node = self->_root; while (node != nullptr) { uint8_t* nodeMem = node->mem; @@ -710,7 +710,7 @@ static MemNode* vMemMgrFindNodeByPtr(VMemMgr* self, uint8_t* mem) { return node; } -static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) { +static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) noexcept { static const size_t permanentAlignment = 32; static const size_t permanentNodeSize = 32768; @@ -760,7 +760,7 @@ static void* vMemMgrAllocPermanent(VMemMgr* self, size_t vSize) { return static_cast(result); } -static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) { +static void* vMemMgrAllocFreeable(VMemMgr* self, size_t vSize) noexcept { // Current index. size_t i; @@ -892,7 +892,7 @@ _Found: //! Reset the whole `VMemMgr` instance, freeing all heap memory allocated an //! virtual memory allocated unless `keepVirtualMemory` is true (and this is //! only used when writing data to a remote process). -static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) { +static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) noexcept { MemNode* node = self->_first; while (node != nullptr) { @@ -921,13 +921,12 @@ static void vMemMgrReset(VMemMgr* self, bool keepVirtualMemory) { // ============================================================================ #if !ASMJIT_OS_WINDOWS -VMemMgr::VMemMgr() +VMemMgr::VMemMgr() noexcept #else -VMemMgr::VMemMgr(HANDLE hProcess) : +VMemMgr::VMemMgr(HANDLE hProcess) noexcept : _hProcess(vMemGet().getSafeProcessHandle(hProcess)) #endif // ASMJIT_OS_WINDOWS { - _blockSize = VMemUtil::getPageGranularity(); _blockDensity = 64; @@ -943,7 +942,7 @@ VMemMgr::VMemMgr(HANDLE hProcess) : _keepVirtualMemory = false; } -VMemMgr::~VMemMgr() { +VMemMgr::~VMemMgr() noexcept { // Freeable memory cleanup - Also frees the virtual memory if configured to. vMemMgrReset(this, _keepVirtualMemory); @@ -960,7 +959,7 @@ VMemMgr::~VMemMgr() { // [asmjit::VMemMgr - Reset] // ============================================================================ -void VMemMgr::reset() { +void VMemMgr::reset() noexcept { vMemMgrReset(this, false); } @@ -968,14 +967,14 @@ void VMemMgr::reset() { // [asmjit::VMemMgr - Alloc / Release] // ============================================================================ -void* VMemMgr::alloc(size_t size, uint32_t type) { +void* VMemMgr::alloc(size_t size, uint32_t type) noexcept { if (type == kVMemAllocPermanent) return vMemMgrAllocPermanent(this, size); else return vMemMgrAllocFreeable(this, size); } -Error VMemMgr::release(void* p) { +Error VMemMgr::release(void* p) noexcept { if (p == nullptr) return kErrorOk; @@ -1062,7 +1061,7 @@ Error VMemMgr::release(void* p) { return kErrorOk; } -Error VMemMgr::shrink(void* p, size_t used) { +Error VMemMgr::shrink(void* p, size_t used) noexcept { if (p == nullptr) return kErrorOk; @@ -1149,7 +1148,7 @@ _EnterFreeLoop: // ============================================================================ #if defined(ASMJIT_TEST) -static void VMemTest_fill(void* a, void* b, int i) { +static void VMemTest_fill(void* a, void* b, int i) noexcept { int pattern = rand() % 256; *(int *)a = i; *(int *)b = i; @@ -1157,7 +1156,7 @@ static void VMemTest_fill(void* a, void* b, int i) { ::memset((char*)b + sizeof(int), pattern, i - sizeof(int)); } -static void VMemTest_verify(void* a, void* b) { +static void VMemTest_verify(void* a, void* b) noexcept { int ai = *(int*)a; int bi = *(int*)b; @@ -1168,12 +1167,12 @@ static void VMemTest_verify(void* a, void* b) { "Pattern (%p) doesn't match", a); } -static void VMemTest_stats(VMemMgr& memmgr) { +static void VMemTest_stats(VMemMgr& memmgr) noexcept { INFO("Used : %u", static_cast(memmgr.getUsedBytes())); INFO("Allocated: %u", static_cast(memmgr.getAllocatedBytes())); } -static void VMemTest_shuffle(void** a, void** b, size_t count) { +static void VMemTest_shuffle(void** a, void** b, size_t count) noexcept { for (size_t i = 0; i < count; ++i) { size_t si = (size_t)rand() % count; diff --git a/src/asmjit/base/vmem.h b/src/asmjit/base/vmem.h index 8b94473..e39388f 100644 --- a/src/asmjit/base/vmem.h +++ b/src/asmjit/base/vmem.h @@ -60,26 +60,26 @@ ASMJIT_ENUM(VMemFlags) { //! overview on how to use a platform specific APIs. struct VMemUtil { //! Get a size/alignment of a single virtual memory page. - static ASMJIT_API size_t getPageSize(); + static ASMJIT_API size_t getPageSize() noexcept; //! Get a recommended granularity for a single `alloc` call. - static ASMJIT_API size_t getPageGranularity(); + static ASMJIT_API size_t getPageGranularity() noexcept; //! Allocate virtual memory. //! //! Pages are readable/writeable, but they are not guaranteed to be //! executable unless 'canExecute' is true. Returns the address of //! allocated memory, or `nullptr` on failure. - static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags); + static ASMJIT_API void* alloc(size_t length, size_t* allocated, uint32_t flags) noexcept; //! Free memory allocated by `alloc()`. - static ASMJIT_API Error release(void* addr, size_t length); + static ASMJIT_API Error release(void* addr, size_t length) noexcept; #if ASMJIT_OS_WINDOWS //! Allocate virtual memory of `hProcess` (Windows only). - static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags); + static ASMJIT_API void* allocProcessMemory(HANDLE hProcess, size_t length, size_t* allocated, uint32_t flags) noexcept; //! Release virtual memory of `hProcess` (Windows only). - static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length); + static ASMJIT_API Error releaseProcessMemory(HANDLE hProcess, void* addr, size_t length) noexcept; #endif // ASMJIT_OS_WINDOWS }; @@ -96,25 +96,25 @@ struct VMemMgr { #if !ASMJIT_OS_WINDOWS //! Create a `VMemMgr` instance. - ASMJIT_API VMemMgr(); + ASMJIT_API VMemMgr() noexcept; #else //! Create a `VMemMgr` instance. //! //! \note When running on Windows it's possible to specify a `hProcess` to //! be used for memory allocation. This allows to allocate memory of remote //! process. - ASMJIT_API VMemMgr(HANDLE hProcess = static_cast(0)); + ASMJIT_API VMemMgr(HANDLE hProcess = static_cast(0)) noexcept; #endif // ASMJIT_OS_WINDOWS //! Destroy the `VMemMgr` instance and free all blocks. - ASMJIT_API ~VMemMgr(); + ASMJIT_API ~VMemMgr() noexcept; // -------------------------------------------------------------------------- // [Reset] // -------------------------------------------------------------------------- //! Free all allocated memory. - ASMJIT_API void reset(); + ASMJIT_API void reset() noexcept; // -------------------------------------------------------------------------- // [Accessors] @@ -122,25 +122,25 @@ struct VMemMgr { #if ASMJIT_OS_WINDOWS //! Get the handle of the process memory manager is bound to. - ASMJIT_INLINE HANDLE getProcessHandle() const { + ASMJIT_INLINE HANDLE getProcessHandle() const noexcept { return _hProcess; } #endif // ASMJIT_OS_WINDOWS //! Get how many bytes are currently allocated. - ASMJIT_INLINE size_t getAllocatedBytes() const { + ASMJIT_INLINE size_t getAllocatedBytes() const noexcept { return _allocatedBytes; } //! Get how many bytes are currently used. - ASMJIT_INLINE size_t getUsedBytes() const { + ASMJIT_INLINE size_t getUsedBytes() const noexcept { return _usedBytes; } //! Get whether to keep allocated memory after the `VMemMgr` is destroyed. //! //! \sa \ref setKeepVirtualMemory. - ASMJIT_INLINE bool getKeepVirtualMemory() const { + ASMJIT_INLINE bool getKeepVirtualMemory() const noexcept { return _keepVirtualMemory; } @@ -156,7 +156,7 @@ struct VMemMgr { //! \note Memory allocated with kVMemAllocPermanent is always kept. //! //! \sa \ref getKeepVirtualMemory. - ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) { + ASMJIT_INLINE void setKeepVirtualMemory(bool keepVirtualMemory) noexcept { _keepVirtualMemory = keepVirtualMemory; } @@ -169,13 +169,13 @@ struct VMemMgr { //! Note that if you are implementing your own virtual memory manager then you //! can quitly ignore type of allocation. This is mainly for AsmJit to memory //! manager that allocated memory will be never freed. - ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable); + ASMJIT_API void* alloc(size_t size, uint32_t type = kVMemAllocFreeable) noexcept; //! Free previously allocated memory at a given `address`. - ASMJIT_API Error release(void* p); + ASMJIT_API Error release(void* p) noexcept; //! Free extra memory allocated with `p`. - ASMJIT_API Error shrink(void* p, size_t used); + ASMJIT_API Error shrink(void* p, size_t used) noexcept; // -------------------------------------------------------------------------- // [Members] diff --git a/src/asmjit/base/zone.cpp b/src/asmjit/base/zone.cpp index 4684d27..1c7ceb9 100644 --- a/src/asmjit/base/zone.cpp +++ b/src/asmjit/base/zone.cpp @@ -28,12 +28,12 @@ static const Zone::Block Zone_zeroBlock = { // [asmjit::Zone - Construction / Destruction] // ============================================================================ -Zone::Zone(size_t blockSize) { +Zone::Zone(size_t blockSize) noexcept { _block = const_cast(&Zone_zeroBlock); _blockSize = blockSize; } -Zone::~Zone() { +Zone::~Zone() noexcept { reset(true); } @@ -41,7 +41,7 @@ Zone::~Zone() { // [asmjit::Zone - Reset] // ============================================================================ -void Zone::reset(bool releaseMemory) { +void Zone::reset(bool releaseMemory) noexcept { Block* cur = _block; // Can't be altered. @@ -80,7 +80,7 @@ void Zone::reset(bool releaseMemory) { // [asmjit::Zone - Alloc] // ============================================================================ -void* Zone::_alloc(size_t size) { +void* Zone::_alloc(size_t size) noexcept { Block* curBlock = _block; size_t blockSize = Utils::iMax(_blockSize, size); @@ -129,14 +129,14 @@ void* Zone::_alloc(size_t size) { return static_cast(newBlock->data); } -void* Zone::allocZeroed(size_t size) { +void* Zone::allocZeroed(size_t size) noexcept { void* p = alloc(size); if (p != nullptr) ::memset(p, 0, size); return p; } -void* Zone::dup(const void* data, size_t size) { +void* Zone::dup(const void* data, size_t size) noexcept { if (data == nullptr) return nullptr; @@ -151,7 +151,7 @@ void* Zone::dup(const void* data, size_t size) { return m; } -char* Zone::sdup(const char* str) { +char* Zone::sdup(const char* str) noexcept { if (str == nullptr) return nullptr; @@ -172,7 +172,7 @@ char* Zone::sdup(const char* str) { return m; } -char* Zone::sformat(const char* fmt, ...) { +char* Zone::sformat(const char* fmt, ...) noexcept { if (fmt == nullptr) return nullptr; diff --git a/src/asmjit/base/zone.h b/src/asmjit/base/zone.h index 0ee79ac..f563bc4 100644 --- a/src/asmjit/base/zone.h +++ b/src/asmjit/base/zone.h @@ -48,12 +48,12 @@ struct Zone { // ------------------------------------------------------------------------ //! Get the size of the block. - ASMJIT_INLINE size_t getBlockSize() const { + ASMJIT_INLINE size_t getBlockSize() const noexcept { return (size_t)(end - data); } //! Get count of remaining bytes in the block. - ASMJIT_INLINE size_t getRemainingSize() const { + ASMJIT_INLINE size_t getRemainingSize() const noexcept { return (size_t)(end - pos); } @@ -98,13 +98,13 @@ struct Zone { //! It's not required, but it's good practice to set `blockSize` to a //! reasonable value that depends on the usage of `Zone`. Greater block sizes //! are generally safer and performs better than unreasonably low values. - ASMJIT_API Zone(size_t blockSize); + ASMJIT_API Zone(size_t blockSize) noexcept; //! Destroy the `Zone` instance. //! //! This will destroy the `Zone` instance and release all blocks of memory //! allocated by it. It performs implicit `reset(true)`. - ASMJIT_API ~Zone(); + ASMJIT_API ~Zone() noexcept; // -------------------------------------------------------------------------- // [Reset] @@ -113,14 +113,14 @@ struct Zone { //! Reset the `Zone` invalidating all blocks allocated. //! //! If `releaseMemory` is true all buffers will be released to the system. - ASMJIT_API void reset(bool releaseMemory = false); + ASMJIT_API void reset(bool releaseMemory = false) noexcept; // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the default block size. - ASMJIT_INLINE size_t getBlockSize() const { + ASMJIT_INLINE size_t getBlockSize() const noexcept { return _blockSize; } @@ -160,7 +160,7 @@ struct Zone { //! // Reset of destroy `Zone`. //! zone.reset(); //! ~~~ - ASMJIT_INLINE void* alloc(size_t size) { + ASMJIT_INLINE void* alloc(size_t size) noexcept { Block* cur = _block; uint8_t* ptr = cur->pos; @@ -178,31 +178,31 @@ struct Zone { //! Allocate `size` bytes of zeroed memory. //! //! See \ref alloc() for more details. - ASMJIT_API void* allocZeroed(size_t size); + ASMJIT_API void* allocZeroed(size_t size) noexcept; //! Like `alloc()`, but the return pointer is casted to `T*`. template - ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) { + ASMJIT_INLINE T* allocT(size_t size = sizeof(T)) noexcept { return static_cast(alloc(size)); } //! Like `allocZeroed()`, but the return pointer is casted to `T*`. template - ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) { + ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T)) noexcept { return static_cast(allocZeroed(size)); } //! \internal - ASMJIT_API void* _alloc(size_t size); + ASMJIT_API void* _alloc(size_t size) noexcept; //! Helper to duplicate data. - ASMJIT_API void* dup(const void* data, size_t size); + ASMJIT_API void* dup(const void* data, size_t size) noexcept; //! Helper to duplicate string. - ASMJIT_API char* sdup(const char* str); + ASMJIT_API char* sdup(const char* str) noexcept; //! Helper to duplicate formatted string, maximum length is 256 bytes. - ASMJIT_API char* sformat(const char* str, ...); + ASMJIT_API char* sformat(const char* str, ...) noexcept; // -------------------------------------------------------------------------- // [Members] diff --git a/src/asmjit/build.h b/src/asmjit/build.h index 2a95f91..1e991c7 100644 --- a/src/asmjit/build.h +++ b/src/asmjit/build.h @@ -70,13 +70,13 @@ // AsmJit features are enabled by default. // #define ASMJIT_DISABLE_COMPILER // Disable Compiler (completely). // #define ASMJIT_DISABLE_LOGGER // Disable Logger (completely). -// #define ASMJIT_DISABLE_NAMES // Disable everything that uses strings -// // (instruction names, error names, ...). +// #define ASMJIT_DISABLE_TEXT // Disable everything that contains text +// // representation (instructions, errors, ...). // Prevent compile-time errors caused by misconfiguration. -#if defined(ASMJIT_DISABLE_NAMES) && !defined(ASMJIT_DISABLE_LOGGER) -# error "[asmjit] ASMJIT_DISABLE_NAMES requires ASMJIT_DISABLE_LOGGER to be defined." -#endif // ASMJIT_DISABLE_NAMES && !ASMJIT_DISABLE_LOGGER +#if defined(ASMJIT_DISABLE_TEXT) && !defined(ASMJIT_DISABLE_LOGGER) +# error "[asmjit] ASMJIT_DISABLE_TEXT requires ASMJIT_DISABLE_LOGGER to be defined." +#endif // ASMJIT_DISABLE_TEXT && !ASMJIT_DISABLE_LOGGER // Detect ASMJIT_DEBUG and ASMJIT_RELEASE if not forced from outside. #if !defined(ASMJIT_DEBUG) && !defined(ASMJIT_RELEASE) && !defined(NDEBUG) @@ -226,7 +226,7 @@ // ============================================================================ // [@ARCH{@] -// \def ASMJIT_ARCH_ARM +// \def ASMJIT_ARCH_ARM32 // True if the target architecture is a 32-bit ARM. // // \def ASMJIT_ARCH_ARM64 @@ -268,18 +268,18 @@ # define ASMJIT_ARCH_ARM64 0 #endif -#if (defined(_M_ARM ) || defined(__arm__ ) || defined(__arm) || \ - defined(_M_ARMT ) || defined(__thumb__)) -# define ASMJIT_ARCH_ARM (!ASMJIT_ARCH_ARM64) +#if (defined(_M_ARM ) || defined(__arm ) || defined(__thumb__ ) || \ + defined(_M_ARMT ) || defined(__arm__ ) || defined(__thumb2__)) +# define ASMJIT_ARCH_ARM32 (!ASMJIT_ARCH_ARM64) #else -# define ASMJIT_ARCH_ARM 0 +# define ASMJIT_ARCH_ARM32 0 #endif -#define ASMJIT_ARCH_LE ( \ - ASMJIT_ARCH_X86 || \ - ASMJIT_ARCH_X64 || \ - ASMJIT_ARCH_ARM || \ - ASMJIT_ARCH_ARM64) +#define ASMJIT_ARCH_LE ( \ + ASMJIT_ARCH_X86 || \ + ASMJIT_ARCH_X64 || \ + ASMJIT_ARCH_ARM32 || \ + ASMJIT_ARCH_ARM64 ) #define ASMJIT_ARCH_BE (!(ASMJIT_ARCH_LE)) #define ASMJIT_ARCH_64BIT (ASMJIT_ARCH_X64 || ASMJIT_ARCH_ARM64) // [@ARCH}@] @@ -690,7 +690,7 @@ // [@CC_NOEXCEPT{@] // \def ASMJIT_NOEXCEPT // The decorated function never throws an exception (noexcept). -#if ASMJIT_HAS_NOEXCEPT +#if ASMJIT_CC_HAS_NOEXCEPT # define ASMJIT_NOEXCEPT noexcept #else # define ASMJIT_NOEXCEPT @@ -860,10 +860,10 @@ typedef unsigned __int64 uint64_t; # endif #endif // !ASMJIT_ALLOC && !ASMJIT_REALLOC && !ASMJIT_FREE -#define ASMJIT_NO_COPY(Self) \ +#define ASMJIT_NO_COPY(...) \ private: \ - ASMJIT_INLINE Self(const Self& other); \ - ASMJIT_INLINE Self& operator=(const Self& other); \ + ASMJIT_INLINE __VA_ARGS__(const __VA_ARGS__& other); \ + ASMJIT_INLINE __VA_ARGS__& operator=(const __VA_ARGS__& other); \ public: // ============================================================================ diff --git a/src/asmjit/host.h b/src/asmjit/host.h index 9d051d2..2e42db4 100644 --- a/src/asmjit/host.h +++ b/src/asmjit/host.h @@ -35,9 +35,6 @@ typedef X86YmmReg YmmReg; typedef X86SegReg SegReg; typedef X86Mem Mem; -// Define host utilities. -typedef X86CpuInfo HostCpuInfo; - // Define host compiler and related. #if !defined(ASMJIT_DISABLE_COMPILER) typedef X86Compiler HostCompiler; diff --git a/src/asmjit/x86.h b/src/asmjit/x86.h index b73d428..ae53bb9 100644 --- a/src/asmjit/x86.h +++ b/src/asmjit/x86.h @@ -14,7 +14,6 @@ #include "./x86/x86assembler.h" #include "./x86/x86compiler.h" #include "./x86/x86compilerfunc.h" -#include "./x86/x86cpuinfo.h" #include "./x86/x86inst.h" #include "./x86/x86operand.h" diff --git a/src/asmjit/x86/x86assembler.cpp b/src/asmjit/x86/x86assembler.cpp index d4389d2..1f20f6f 100644 --- a/src/asmjit/x86/x86assembler.cpp +++ b/src/asmjit/x86/x86assembler.cpp @@ -13,12 +13,12 @@ // [Dependencies - AsmJit] #include "../base/containers.h" +#include "../base/cpuinfo.h" #include "../base/logger.h" #include "../base/runtime.h" #include "../base/utils.h" #include "../base/vmem.h" #include "../x86/x86assembler.h" -#include "../x86/x86cpuinfo.h" // [Api-Begin] #include "../apibegin.h" @@ -99,17 +99,10 @@ struct X86OpCodeMM { //! \internal //! -//! Mandatory prefixes encoded in 'asmjit' opcode [66, F3, F2] and asmjit +//! Mandatory prefixes encoded in 'asmjit' opcode [66, F3, F2] and AsmJit //! extensions static const uint8_t x86OpCodePP[8] = { - 0x00, - 0x66, - 0xF3, - 0xF2, - 0x00, - 0x00, - 0x00, - 0x9B + 0x00, 0x66, 0xF3, 0xF2, 0x00, 0x00, 0x00, 0x9B }; //! \internal @@ -206,70 +199,65 @@ static ASMJIT_INLINE bool x86IsYmm(const X86Reg* reg) { return reg->isYmm(); } // [Macros] // ============================================================================ -#define ENC_OPS(_Op0_, _Op1_, _Op2_) \ - ((kOperandType##_Op0_) + ((kOperandType##_Op1_) << 3) + ((kOperandType##_Op2_) << 6)) +#define ENC_OPS(op0, op1, op2) \ + ((Operand::kType##op0) + ((Operand::kType##op1) << 3) + ((Operand::kType##op2) << 6)) -#define ADD_66H_P(_Exp_) \ +#define ADD_66H_P(exp) \ do { \ - opCode |= (static_cast(_Exp_) << kX86InstOpCode_PP_Shift); \ + opCode |= (static_cast(exp) << kX86InstOpCode_PP_Shift); \ } while (0) -#define ADD_66H_P_BY_SIZE(_Size_) \ +#define ADD_66H_P_BY_SIZE(sz) \ do { \ - opCode |= (static_cast(_Size_) & 0x02) << (kX86InstOpCode_PP_Shift - 1); \ + opCode |= (static_cast(sz) & 0x02) << (kX86InstOpCode_PP_Shift - 1); \ } while (0) -#define ADD_REX_W(_Exp_) \ +#define ADD_REX_W(exp) \ do { \ if (Arch == kArchX64) \ - opCode |= static_cast(_Exp_) << kX86InstOpCode_W_Shift; \ + opCode |= static_cast(exp) << kX86InstOpCode_W_Shift; \ } while (0) -#define ADD_REX_W_BY_SIZE(_Size_) \ +#define ADD_REX_W_BY_SIZE(sz) \ do { \ - if (Arch == kArchX64 && (_Size_) == 8) \ + if (Arch == kArchX64 && (sz) == 8) \ opCode |= kX86InstOpCode_W; \ } while (0) -#define ADD_VEX_W(_Exp_) \ +#define ADD_VEX_W(exp) \ do { \ - opCode |= static_cast(_Exp_) << kX86InstOpCode_W_Shift; \ + opCode |= static_cast(exp) << kX86InstOpCode_W_Shift; \ } while (0) -#define ADD_VEX_L(_Exp_) \ +#define ADD_VEX_L(exp) \ do { \ - opCode |= static_cast(_Exp_) << kX86InstOpCode_L_Shift; \ + opCode |= static_cast(exp) << kX86InstOpCode_L_Shift; \ } while (0) #define EMIT_BYTE(_Val_) \ do { \ - cursor[0] = static_cast(_Val_); \ + cursor[0] = static_cast((_Val_) & 0xFF); \ cursor += 1; \ } while (0) #define EMIT_WORD(_Val_) \ do { \ - reinterpret_cast(cursor)[0] = static_cast(_Val_); \ + Utils::writeU16uLE(cursor, static_cast(_Val_)); \ cursor += 2; \ } while (0) #define EMIT_DWORD(_Val_) \ do { \ - reinterpret_cast(cursor)[0] = static_cast(_Val_); \ + Utils::writeU32uLE(cursor, static_cast(_Val_)); \ cursor += 4; \ } while (0) #define EMIT_QWORD(_Val_) \ do { \ - reinterpret_cast(cursor)[0] = static_cast(_Val_); \ + Utils::writeU64uLE(cursor, static_cast(_Val_)); \ cursor += 8; \ } while (0) -#define EMIT_OP(_Val_) \ - do { \ - EMIT_BYTE((_Val_) & 0xFF); \ - } while (0) - #define EMIT_PP(_Val_) \ do { \ uint32_t ppIndex = ((_Val_) >> kX86InstOpCode_PP_Shift) & (kX86InstOpCode_PP_Mask >> kX86InstOpCode_PP_Shift); \ @@ -309,6 +297,7 @@ X86Assembler::X86Assembler(Runtime* runtime, uint32_t arch) zbp(NoInit), zsi(NoInit), zdi(NoInit) { + ASMJIT_ASSERT(arch == kArchX86 || arch == kArchX64); _setArch(arch); } @@ -367,13 +356,12 @@ Error X86Assembler::embedLabel(const Label& op) { ASMJIT_PROPAGATE_ERROR(_grow(regSize)); uint8_t* cursor = getCursor(); - LabelData* label = getLabelData(op.getId()); RelocData rd; #if !defined(ASMJIT_DISABLE_LOGGER) if (_logger) - _logger->logFormat(kLoggerStyleData, regSize == 4 ? ".dd L%u\n" : ".dq L%u\n", op.getId()); + _logger->logFormat(Logger::kStyleData, regSize == 4 ? ".dd L%u\n" : ".dq L%u\n", op.getId()); #endif // !ASMJIT_DISABLE_LOGGER rd.type = kRelocRelToAbs; @@ -392,19 +380,18 @@ Error X86Assembler::embedLabel(const Label& op) { link->prev = (LabelLink*)label->links; link->offset = getOffset(); link->displacement = 0; - link->relocId = _relocList.getLength(); + link->relocId = _relocations.getLength(); label->links = link; } - if (_relocList.append(rd) != kErrorOk) + if (_relocations.append(rd) != kErrorOk) return setLastError(kErrorNoHeapMemory); // Emit dummy intptr_t (4 or 8 bytes; depends on the address size). - if (regSize == 4) + EMIT_DWORD(0); + if (regSize == 8) EMIT_DWORD(0); - else - EMIT_QWORD(0); setCursor(cursor); return kErrorOk; @@ -414,10 +401,10 @@ Error X86Assembler::embedLabel(const Label& op) { // [asmjit::X86Assembler - Align] // ============================================================================ -Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { +Error X86Assembler::align(uint32_t alignMode, uint32_t offset) noexcept { #if !defined(ASMJIT_DISABLE_LOGGER) if (_logger) - _logger->logFormat(kLoggerStyleDirective, + _logger->logFormat(Logger::kStyleDirective, "%s.align %u\n", _logger->getIndentation(), static_cast(offset)); #endif // !ASMJIT_DISABLE_LOGGER @@ -436,7 +423,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { switch (alignMode) { case kAlignCode: { - if (hasFeature(kAssemblerFeatureOptimizedAlign)) { + if (hasAsmOption(kOptionOptimizedAlign)) { // Intel 64 and IA-32 Architectures Software Developer's Manual - Volume 2B (NOP). enum { kMaxNopSize = 9 }; @@ -454,7 +441,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { do { uint32_t n = Utils::iMin(i, kMaxNopSize); - const uint8_t* p = nopData[(n - 1)]; + const uint8_t* p = nopData[n - 1]; i -= n; do { @@ -491,7 +478,7 @@ Error X86Assembler::align(uint32_t alignMode, uint32_t offset) { // [asmjit::X86Assembler - Reloc] // ============================================================================ -size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { +size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const noexcept { uint32_t arch = getArch(); uint8_t* dst = static_cast(_dst); @@ -510,8 +497,8 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { uint8_t* tramp = dst + minCodeSize; // Relocate all recorded locations. - size_t relocCount = _relocList.getLength(); - const RelocData* rdList = _relocList.getData(); + size_t relocCount = _relocations.getLength(); + const RelocData* rdList = _relocations.getData(); for (size_t i = 0; i < relocCount; i++) { const RelocData& rd = rdList[i]; @@ -551,12 +538,12 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { } switch (rd.size) { - case 8: - *reinterpret_cast(dst + offset) = static_cast(ptr); + case 4: + Utils::writeU32u(dst + offset, static_cast(static_cast(ptr))); break; - case 4: - *reinterpret_cast(dst + offset) = static_cast(static_cast(ptr)); + case 8: + Utils::writeI64u(dst + offset, static_cast(ptr)); break; default: @@ -582,14 +569,14 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { dst[offset - 1] = byte1; // Absolute address. - ((uint64_t*)tramp)[0] = static_cast(rd.data); + Utils::writeU64u(tramp, static_cast(rd.data)); // Advance trampoline pointer. tramp += 8; #if !defined(ASMJIT_DISABLE_LOGGER) if (logger) - logger->logFormat(kLoggerStyleComment, "; Trampoline %llX\n", rd.data); + logger->logFormat(Logger::kStyleComment, "; Trampoline %llX\n", rd.data); #endif // !ASMJIT_DISABLE_LOGGER } } @@ -606,7 +593,7 @@ size_t X86Assembler::_relocCode(void* _dst, Ptr baseAddress) const { #if !defined(ASMJIT_DISABLE_LOGGER) // Logging helpers. -static const char* AssemblerX86_getAddressSizeString(uint32_t size) { +static const char* AssemblerX86_getAddressSizeString(uint32_t size) noexcept { switch (size) { case 1 : return "byte ptr "; case 2 : return "word ptr "; @@ -820,7 +807,7 @@ static void X86Assembler_dumpOperand(StringBuilder& sb, uint32_t arch, const Ope } sb._appendChar(prefix); - if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) { + if ((loggerOptions & Logger::kOptionHexDisplacement) != 0 && dispOffset > 9) { sb._appendString("0x", 2); base = 16; } @@ -833,7 +820,7 @@ static void X86Assembler_dumpOperand(StringBuilder& sb, uint32_t arch, const Ope const Imm* i = static_cast(op); int64_t val = i->getInt64(); - if ((loggerOptions & (1 << kLoggerOptionHexImmediate)) && static_cast(val) > 9) + if ((loggerOptions & Logger::kOptionHexImmediate) != 0 && static_cast(val) > 9) sb.appendUInt(static_cast(val), 16); else sb.appendInt(val, 10); @@ -904,13 +891,13 @@ static bool X86Assembler_dumpInstruction(StringBuilder& sb, #define HI_REG(_Index_) ((_kX86RegTypePatchedGpbHi << 8) | _Index_) //! \internal static const Operand::VRegOp x86PatchedHiRegs[4] = { - // --------------+---+--------------+--------------+------------+ - // Operand | S | Reg. Code | OperandId | Unused | - // --------------+---+--------------+--------------+------------+ - { kOperandTypeReg, 1 , { HI_REG(4) }, kInvalidValue, {{ 0, 0 }} }, - { kOperandTypeReg, 1 , { HI_REG(5) }, kInvalidValue, {{ 0, 0 }} }, - { kOperandTypeReg, 1 , { HI_REG(6) }, kInvalidValue, {{ 0, 0 }} }, - { kOperandTypeReg, 1 , { HI_REG(7) }, kInvalidValue, {{ 0, 0 }} } + // ----------------+---+--------------+--------------+------------+ + // Operand | S | Reg. Code | OperandId | Unused | + // ----------------+---+--------------+--------------+------------+ + { Operand::kTypeReg, 1 , { HI_REG(4) }, kInvalidValue, {{ 0, 0 }} }, + { Operand::kTypeReg, 1 , { HI_REG(5) }, kInvalidValue, {{ 0, 0 }} }, + { Operand::kTypeReg, 1 , { HI_REG(6) }, kInvalidValue, {{ 0, 0 }} }, + { Operand::kTypeReg, 1 , { HI_REG(7) }, kInvalidValue, {{ 0, 0 }} } }; #undef HI_REG @@ -997,8 +984,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co } else { // `W` field. - ASMJIT_ASSERT(static_cast(kX86InstOptionRex) == - static_cast(kX86ByteRex)); + ASMJIT_ASSERT(static_cast(kX86InstOptionRex) == static_cast(kX86ByteRex)); // Check if one or more register operand is one of BPL, SPL, SIL, DIL and // force a REX prefix to be emitted in such case. @@ -1282,12 +1268,12 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co intptr_t offs = label->offset - (intptr_t)(cursor - self->_buffer); ASMJIT_ASSERT(offs <= 0); - EMIT_OP(opCode); + EMIT_BYTE(opCode); EMIT_DWORD(static_cast(offs - kRel32Size)); } else { // Non-bound label. - EMIT_OP(opCode); + EMIT_BYTE(opCode); dispOffset = -4; dispSize = 4; relocId = -1; @@ -1435,10 +1421,10 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co uint8_t imm8 = static_cast(imVal & 0xFF); if (imm8 == 0x03) { - EMIT_OP(opCode); + EMIT_BYTE(opCode); } else { - EMIT_OP(opCode + 1); + EMIT_BYTE(opCode + 1); EMIT_BYTE(imm8); } goto _EmitDone; @@ -1449,7 +1435,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co if (encoded == ENC_OPS(Label, None, None)) { label = self->getLabelData(static_cast(o0)->getId()); - if (self->hasFeature(kAssemblerFeaturePredictedJumps)) { + if (self->hasAsmOption(Assembler::kOptionPredictedJumps)) { if (options & kInstOptionTaken) EMIT_BYTE(0x3E); if (options & kInstOptionNotTaken) @@ -1465,7 +1451,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co ASMJIT_ASSERT(offs <= 0); if ((options & kInstOptionLongForm) == 0 && Utils::isInt8(offs - kRel8Size)) { - EMIT_OP(opCode); + EMIT_BYTE(opCode); EMIT_BYTE(offs - kRel8Size); options |= kInstOptionShortForm; @@ -1473,7 +1459,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co } else { EMIT_BYTE(0x0F); - EMIT_OP(opCode + 0x10); + EMIT_BYTE(opCode + 0x10); EMIT_DWORD(static_cast(offs - kRel32Size)); options &= ~kInstOptionShortForm; @@ -1483,7 +1469,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co else { // Non-bound label. if (options & kInstOptionShortForm) { - EMIT_OP(opCode); + EMIT_BYTE(opCode); dispOffset = -1; dispSize = 1; relocId = -1; @@ -1491,7 +1477,7 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co } else { EMIT_BYTE(0x0F); - EMIT_OP(opCode + 0x10); + EMIT_BYTE(opCode + 0x10); dispOffset = -4; dispSize = 4; relocId = -1; @@ -1868,9 +1854,9 @@ static ASMJIT_INLINE Error X86Assembler_emit(Assembler* self_, uint32_t code, co else { _GroupPop_Gp: // We allow 2 byte, 4 byte, and 8 byte register sizes, althought PUSH - // and POP only allows 2 bytes or register width. On 64-bit we simply + // and POP only allow 2 bytes or native size. On 64-bit we simply // PUSH/POP 64-bit register even if 32-bit register was given. - if (o0->getSize() < 1) + if (o0->getSize() < 2) goto _IllegalInst; opCode = extendedInfo.getSecondaryOpCode(); @@ -2359,7 +2345,7 @@ _EmitFpArith_Mem: } EMIT_BYTE(0x0F); - EMIT_OP(opCode); + EMIT_BYTE(opCode); EMIT_BYTE(0xC0 | (opReg << 3)); goto _EmitDone; @@ -3512,7 +3498,7 @@ _EmitX86Op: // Instruction opcodes. EMIT_MM(opCode); - EMIT_OP(opCode); + EMIT_BYTE(opCode); if (imLen != 0) goto _EmitImm; @@ -3542,7 +3528,7 @@ _EmitX86OpWithOpReg: // Instruction opcodes. opCode += opReg; EMIT_MM(opCode); - EMIT_OP(opCode); + EMIT_BYTE(opCode); if (imLen != 0) goto _EmitImm; @@ -3573,7 +3559,7 @@ _EmitX86R: // Instruction opcodes. EMIT_MM(opCode); - EMIT_OP(opCode); + EMIT_BYTE(opCode); // ModR. EMIT_BYTE(x86EncodeMod(3, opReg, static_cast(rmReg))); @@ -3585,7 +3571,7 @@ _EmitX86R: _EmitX86M: ASMJIT_ASSERT(rmMem != nullptr); - ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem); + ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem); mBase = rmMem->getBase(); mIndex = rmMem->getIndex(); @@ -3632,7 +3618,7 @@ _EmitX86M: // Instruction opcodes. EMIT_MM(opCode); - EMIT_OP(opCode); + EMIT_BYTE(opCode); // ... Fall through ... // -------------------------------------------------------------------------- @@ -3724,7 +3710,7 @@ _EmitSib: else if (rmMem->getMemType() == kMemTypeLabel) { // Relative->Absolute [x86 mode]. label = self->getLabelData(rmMem->_vmem.base); - relocId = self->_relocList.getLength(); + relocId = self->_relocations.getLength(); RelocData rd; rd.type = kRelocRelToAbs; @@ -3732,12 +3718,12 @@ _EmitSib: rd.from = static_cast((uintptr_t)(cursor - self->_buffer)); rd.data = static_cast(dispOffset); - if (self->_relocList.append(rd) != kErrorOk) + if (self->_relocations.append(rd) != kErrorOk) return self->setLastError(kErrorNoHeapMemory); if (label->offset != -1) { // Bound label. - self->_relocList[relocId].data += static_cast(label->offset); + self->_relocations[relocId].data += static_cast(label->offset); EMIT_DWORD(0); } else { @@ -3749,7 +3735,7 @@ _EmitSib: } else { // RIP->Absolute [x86 mode]. - relocId = self->_relocList.getLength(); + relocId = self->_relocations.getLength(); RelocData rd; rd.type = kRelocRelToAbs; @@ -3757,7 +3743,7 @@ _EmitSib: rd.from = static_cast((uintptr_t)(cursor - self->_buffer)); rd.data = rd.from + static_cast(dispOffset); - if (self->_relocList.append(rd) != kErrorOk) + if (self->_relocations.append(rd) != kErrorOk) return self->setLastError(kErrorNoHeapMemory); EMIT_DWORD(0); @@ -3843,8 +3829,8 @@ _EmitFpuOp: EMIT_PP(opCode); // Instruction opcodes. - EMIT_OP(opCode >> 8); - EMIT_OP(opCode); + EMIT_BYTE(opCode >> 8); + EMIT_BYTE(opCode); goto _EmitDone; // -------------------------------------------------------------------------- @@ -3853,7 +3839,7 @@ _EmitFpuOp: #define EMIT_AVX_M \ ASMJIT_ASSERT(rmMem != nullptr); \ - ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem); \ + ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem); \ \ if (rmMem->hasSegment()) { \ EMIT_BYTE(x86SegmentPrefix[rmMem->getSegment()]); \ @@ -3883,7 +3869,7 @@ _EmitFpuOp: EMIT_BYTE(kX86ByteVex3); \ EMIT_BYTE(vex_rxbmmmmm); \ EMIT_BYTE(vex_XvvvvLpp); \ - EMIT_OP(opCode); \ + EMIT_BYTE(opCode); \ } \ else { \ vex_XvvvvLpp |= static_cast(opReg << 4) & 0x80; \ @@ -3891,7 +3877,7 @@ _EmitFpuOp: \ EMIT_BYTE(kX86ByteVex2); \ EMIT_BYTE(vex_XvvvvLpp); \ - EMIT_OP(opCode); \ + EMIT_BYTE(opCode); \ } \ } \ \ @@ -3911,14 +3897,14 @@ _EmitAvxOp: uint32_t vex_rxbmmmmm = (opCode >> kX86InstOpCode_MM_Shift) | 0xE0; EMIT_BYTE(kX86ByteVex3); - EMIT_OP(vex_rxbmmmmm); - EMIT_OP(vex_XvvvvLpp); - EMIT_OP(opCode); + EMIT_BYTE(vex_rxbmmmmm); + EMIT_BYTE(vex_XvvvvLpp); + EMIT_BYTE(opCode); } else { EMIT_BYTE(kX86ByteVex2); - EMIT_OP(vex_XvvvvLpp); - EMIT_OP(opCode); + EMIT_BYTE(vex_XvvvvLpp); + EMIT_BYTE(opCode); } } goto _EmitDone; @@ -3942,9 +3928,9 @@ _EmitAvxR: vex_XvvvvLpp ^= 0x78; EMIT_BYTE(kX86ByteVex3); - EMIT_OP(vex_rxbmmmmm); - EMIT_OP(vex_XvvvvLpp); - EMIT_OP(opCode); + EMIT_BYTE(vex_rxbmmmmm); + EMIT_BYTE(vex_XvvvvLpp); + EMIT_BYTE(opCode); rmReg &= 0x07; } @@ -3953,8 +3939,8 @@ _EmitAvxR: vex_XvvvvLpp ^= 0xF8; EMIT_BYTE(kX86ByteVex2); - EMIT_OP(vex_XvvvvLpp); - EMIT_OP(opCode); + EMIT_BYTE(vex_XvvvvLpp); + EMIT_BYTE(opCode); } } @@ -4014,7 +4000,7 @@ _EmitAvxV: // Relative->Absolute [x86 mode]. label = self->getLabelData(rmMem->_vmem.base); - relocId = self->_relocList.getLength(); + relocId = self->_relocations.getLength(); { RelocData rd; @@ -4023,13 +4009,13 @@ _EmitAvxV: rd.from = static_cast((uintptr_t)(cursor - self->_buffer)); rd.data = static_cast(dispOffset); - if (self->_relocList.append(rd) != kErrorOk) + if (self->_relocations.append(rd) != kErrorOk) return self->setLastError(kErrorNoHeapMemory); } if (label->offset != -1) { // Bound label. - self->_relocList[relocId].data += static_cast(label->offset); + self->_relocations[relocId].data += static_cast(label->offset); EMIT_DWORD(0); } else { @@ -4052,7 +4038,7 @@ _EmitAvxV: #define EMIT_XOP_M \ ASMJIT_ASSERT(rmMem != nullptr); \ - ASMJIT_ASSERT(rmMem->getOp() == kOperandTypeMem); \ + ASMJIT_ASSERT(rmMem->getOp() == Operand::kTypeMem); \ \ if (rmMem->hasSegment()) { \ EMIT_BYTE(x86SegmentPrefix[rmMem->getSegment()]); \ @@ -4081,7 +4067,7 @@ _EmitAvxV: EMIT_BYTE(kX86ByteXop3); \ EMIT_BYTE(vex_rxbmmmmm); \ EMIT_BYTE(vex_XvvvvLpp); \ - EMIT_OP(opCode); \ + EMIT_BYTE(opCode); \ } \ \ mBase &= 0x07; \ @@ -4105,9 +4091,9 @@ _EmitXopR: xop_XvvvvLpp ^= 0x78; EMIT_BYTE(kX86ByteXop3); - EMIT_OP(xop_rxbmmmmm); - EMIT_OP(xop_XvvvvLpp); - EMIT_OP(opCode); + EMIT_BYTE(xop_rxbmmmmm); + EMIT_BYTE(xop_XvvvvLpp); + EMIT_BYTE(opCode); rmReg &= 0x07; } @@ -4168,10 +4154,10 @@ _EmitJmpOrCallAbs: // Both `jmp` and `call` instructions have a single-byte opcode and are // followed by a 32-bit displacement. - EMIT_OP(opCode); + EMIT_BYTE(opCode); EMIT_DWORD(0); - if (self->_relocList.append(rd) != kErrorOk) + if (self->_relocations.append(rd) != kErrorOk) return self->setLastError(kErrorNoHeapMemory); // Reserve space for a possible trampoline. @@ -4227,7 +4213,7 @@ _EmitDone: X86Assembler_dumpInstruction(sb, Arch, code, options, o0, o1, o2, o3, loggerOptions); - if ((loggerOptions & (1 << kLoggerOptionBinaryForm)) != 0) + if ((loggerOptions & Logger::kOptionBinaryForm) != 0) LogUtil::formatLine(sb, self->_cursor, (intptr_t)(cursor - self->_cursor), dispSize, imLen, self->_comment); else LogUtil::formatLine(sb, nullptr, kInvalidIndex, 0, 0, self->_comment); @@ -4235,7 +4221,7 @@ _EmitDone: # if defined(ASMJIT_DEBUG) if (self->_logger) # endif // ASMJIT_DEBUG - self->_logger->logString(kLoggerStyleDefault, sb.getData(), sb.getLength()); + self->_logger->logString(Logger::kStyleDefault, sb.getData(), sb.getLength()); # if defined(ASMJIT_DEBUG) // This shouldn't happen. @@ -4257,8 +4243,10 @@ _EmitDone: Error X86Assembler::_emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { #if defined(ASMJIT_BUILD_X86) && !defined(ASMJIT_BUILD_X64) + ASMJIT_ASSERT(_arch == kArchX86); return X86Assembler_emit(this, code, &o0, &o1, &o2, &o3); #elif !defined(ASMJIT_BUILD_X86) && defined(ASMJIT_BUILD_X64) + ASMJIT_ASSERT(_arch == kArchX64); return X86Assembler_emit(this, code, &o0, &o1, &o2, &o3); #else if (_arch == kArchX86) diff --git a/src/asmjit/x86/x86assembler.h b/src/asmjit/x86/x86assembler.h index 21d45c5..b7baed0 100644 --- a/src/asmjit/x86/x86assembler.h +++ b/src/asmjit/x86/x86assembler.h @@ -162,10 +162,10 @@ namespace asmjit { //! a.ret(); //! ~~~ //! -//! You can see that syntax is very close to Intel one. Only difference is that -//! you are calling functions that emit binary code for you. All registers are -//! in `asmjit::x86` namespace, so it's very comfortable to use it (look at the -//! `use namespace` section). Without importing `asmjit::x86` registers would +//! You can see that syntax is very close to the Intel one. Only difference is +//! that you are calling functions that emit binary code for you. All registers +//! are in `asmjit::x86` namespace, so it's very comfortable to use it (look at +//! the `use namespace` section). Without importing `asmjit::x86` registers would //! have to be written as `x86::eax`, `x86::esp`, and so on. //! //! There is also possibility to use memory addresses and immediates. Use @@ -278,7 +278,7 @@ namespace asmjit { //! code with labels. Labels are fully supported and you can call `jmp` or //! `je` (and similar) instructions to initialized or yet uninitialized label. //! Each label expects to be bound into offset. To bind label to specific -//! offset, use `CodeGen::bind()` method. +//! offset, use `Assembler::bind()` function. //! //! See next example that contains complete code that creates simple memory //! copy function (in DWord entities). @@ -292,14 +292,14 @@ namespace asmjit { //! //! // Assembler instance. //! JitRuntime runtime; -//! Assembler a(&runtime); +//! X86Assembler a(&runtime); //! //! // Constants. //! const int arg_offset = 8; // Arguments offset (STDCALL EBP). //! const int arg_size = 12; // Arguments size. //! //! // Labels. -//! Label L_Loop(a); +//! Label L_Loop = a.newLabel(); //! //! // Prolog. //! a.push(ebp); @@ -491,13 +491,13 @@ struct ASMJIT_VIRTAPI X86Assembler : public Assembler { // [Align] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error align(uint32_t alignMode, uint32_t offset); + ASMJIT_API virtual Error align(uint32_t alignMode, uint32_t offset) noexcept; // -------------------------------------------------------------------------- // [Reloc] // -------------------------------------------------------------------------- - ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const; + ASMJIT_API virtual size_t _relocCode(void* dst, Ptr baseAddress) const noexcept; // -------------------------------------------------------------------------- // [Emit] @@ -539,164 +539,164 @@ struct ASMJIT_VIRTAPI X86Assembler : public Assembler { // [Emit] // -------------------------------------------------------------------------- -#define INST_0x(_Inst_, _Code_) \ - ASMJIT_INLINE Error _Inst_() { \ - return emit(_Code_); \ +#define INST_0x(inst, code) \ + ASMJIT_INLINE Error inst() { \ + return emit(code); \ } -#define INST_1x(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0) { \ - return emit(_Code_, o0); \ +#define INST_1x(inst, code, T0) \ + ASMJIT_INLINE Error inst(const T0& o0) { \ + return emit(code, o0); \ } -#define INST_1i(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0) { return emit(_Code_, o0); } \ +#define INST_1i(inst, code, T0) \ + ASMJIT_INLINE Error inst(const T0& o0) { return emit(code, o0); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(int o0) { return emit(_Code_, Utils::asInt(o0)); } \ + ASMJIT_INLINE Error inst(int o0) { return emit(code, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(unsigned int o0) { return emit(_Code_, Utils::asInt(o0)); } \ + ASMJIT_INLINE Error inst(unsigned int o0) { return emit(code, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(int64_t o0) { return emit(_Code_, Utils::asInt(o0)); } \ + ASMJIT_INLINE Error inst(int64_t o0) { return emit(code, Utils::asInt(o0)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(uint64_t o0) { return emit(_Code_, Utils::asInt(o0)); } + ASMJIT_INLINE Error inst(uint64_t o0) { return emit(code, Utils::asInt(o0)); } -#define INST_1cc(_Inst_, _Code_, _Translate_, _Op0_) \ - ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0) { \ +#define INST_1cc(inst, code, _Translate_, T0) \ + ASMJIT_INLINE Error inst(uint32_t cc, const T0& o0) { \ return emit(_Translate_(cc), o0); \ } \ \ - ASMJIT_INLINE Error _Inst_##a(const _Op0_& o0) { return emit(_Code_##a, o0); } \ - ASMJIT_INLINE Error _Inst_##ae(const _Op0_& o0) { return emit(_Code_##ae, o0); } \ - ASMJIT_INLINE Error _Inst_##b(const _Op0_& o0) { return emit(_Code_##b, o0); } \ - ASMJIT_INLINE Error _Inst_##be(const _Op0_& o0) { return emit(_Code_##be, o0); } \ - ASMJIT_INLINE Error _Inst_##c(const _Op0_& o0) { return emit(_Code_##c, o0); } \ - ASMJIT_INLINE Error _Inst_##e(const _Op0_& o0) { return emit(_Code_##e, o0); } \ - ASMJIT_INLINE Error _Inst_##g(const _Op0_& o0) { return emit(_Code_##g, o0); } \ - ASMJIT_INLINE Error _Inst_##ge(const _Op0_& o0) { return emit(_Code_##ge, o0); } \ - ASMJIT_INLINE Error _Inst_##l(const _Op0_& o0) { return emit(_Code_##l, o0); } \ - ASMJIT_INLINE Error _Inst_##le(const _Op0_& o0) { return emit(_Code_##le, o0); } \ - ASMJIT_INLINE Error _Inst_##na(const _Op0_& o0) { return emit(_Code_##na, o0); } \ - ASMJIT_INLINE Error _Inst_##nae(const _Op0_& o0) { return emit(_Code_##nae, o0); } \ - ASMJIT_INLINE Error _Inst_##nb(const _Op0_& o0) { return emit(_Code_##nb, o0); } \ - ASMJIT_INLINE Error _Inst_##nbe(const _Op0_& o0) { return emit(_Code_##nbe, o0); } \ - ASMJIT_INLINE Error _Inst_##nc(const _Op0_& o0) { return emit(_Code_##nc, o0); } \ - ASMJIT_INLINE Error _Inst_##ne(const _Op0_& o0) { return emit(_Code_##ne, o0); } \ - ASMJIT_INLINE Error _Inst_##ng(const _Op0_& o0) { return emit(_Code_##ng, o0); } \ - ASMJIT_INLINE Error _Inst_##nge(const _Op0_& o0) { return emit(_Code_##nge, o0); } \ - ASMJIT_INLINE Error _Inst_##nl(const _Op0_& o0) { return emit(_Code_##nl, o0); } \ - ASMJIT_INLINE Error _Inst_##nle(const _Op0_& o0) { return emit(_Code_##nle, o0); } \ - ASMJIT_INLINE Error _Inst_##no(const _Op0_& o0) { return emit(_Code_##no, o0); } \ - ASMJIT_INLINE Error _Inst_##np(const _Op0_& o0) { return emit(_Code_##np, o0); } \ - ASMJIT_INLINE Error _Inst_##ns(const _Op0_& o0) { return emit(_Code_##ns, o0); } \ - ASMJIT_INLINE Error _Inst_##nz(const _Op0_& o0) { return emit(_Code_##nz, o0); } \ - ASMJIT_INLINE Error _Inst_##o(const _Op0_& o0) { return emit(_Code_##o, o0); } \ - ASMJIT_INLINE Error _Inst_##p(const _Op0_& o0) { return emit(_Code_##p, o0); } \ - ASMJIT_INLINE Error _Inst_##pe(const _Op0_& o0) { return emit(_Code_##pe, o0); } \ - ASMJIT_INLINE Error _Inst_##po(const _Op0_& o0) { return emit(_Code_##po, o0); } \ - ASMJIT_INLINE Error _Inst_##s(const _Op0_& o0) { return emit(_Code_##s, o0); } \ - ASMJIT_INLINE Error _Inst_##z(const _Op0_& o0) { return emit(_Code_##z, o0); } + ASMJIT_INLINE Error inst##a(const T0& o0) { return emit(code##a, o0); } \ + ASMJIT_INLINE Error inst##ae(const T0& o0) { return emit(code##ae, o0); } \ + ASMJIT_INLINE Error inst##b(const T0& o0) { return emit(code##b, o0); } \ + ASMJIT_INLINE Error inst##be(const T0& o0) { return emit(code##be, o0); } \ + ASMJIT_INLINE Error inst##c(const T0& o0) { return emit(code##c, o0); } \ + ASMJIT_INLINE Error inst##e(const T0& o0) { return emit(code##e, o0); } \ + ASMJIT_INLINE Error inst##g(const T0& o0) { return emit(code##g, o0); } \ + ASMJIT_INLINE Error inst##ge(const T0& o0) { return emit(code##ge, o0); } \ + ASMJIT_INLINE Error inst##l(const T0& o0) { return emit(code##l, o0); } \ + ASMJIT_INLINE Error inst##le(const T0& o0) { return emit(code##le, o0); } \ + ASMJIT_INLINE Error inst##na(const T0& o0) { return emit(code##na, o0); } \ + ASMJIT_INLINE Error inst##nae(const T0& o0) { return emit(code##nae, o0); } \ + ASMJIT_INLINE Error inst##nb(const T0& o0) { return emit(code##nb, o0); } \ + ASMJIT_INLINE Error inst##nbe(const T0& o0) { return emit(code##nbe, o0); } \ + ASMJIT_INLINE Error inst##nc(const T0& o0) { return emit(code##nc, o0); } \ + ASMJIT_INLINE Error inst##ne(const T0& o0) { return emit(code##ne, o0); } \ + ASMJIT_INLINE Error inst##ng(const T0& o0) { return emit(code##ng, o0); } \ + ASMJIT_INLINE Error inst##nge(const T0& o0) { return emit(code##nge, o0); } \ + ASMJIT_INLINE Error inst##nl(const T0& o0) { return emit(code##nl, o0); } \ + ASMJIT_INLINE Error inst##nle(const T0& o0) { return emit(code##nle, o0); } \ + ASMJIT_INLINE Error inst##no(const T0& o0) { return emit(code##no, o0); } \ + ASMJIT_INLINE Error inst##np(const T0& o0) { return emit(code##np, o0); } \ + ASMJIT_INLINE Error inst##ns(const T0& o0) { return emit(code##ns, o0); } \ + ASMJIT_INLINE Error inst##nz(const T0& o0) { return emit(code##nz, o0); } \ + ASMJIT_INLINE Error inst##o(const T0& o0) { return emit(code##o, o0); } \ + ASMJIT_INLINE Error inst##p(const T0& o0) { return emit(code##p, o0); } \ + ASMJIT_INLINE Error inst##pe(const T0& o0) { return emit(code##pe, o0); } \ + ASMJIT_INLINE Error inst##po(const T0& o0) { return emit(code##po, o0); } \ + ASMJIT_INLINE Error inst##s(const T0& o0) { return emit(code##s, o0); } \ + ASMJIT_INLINE Error inst##z(const T0& o0) { return emit(code##z, o0); } -#define INST_2x(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1) { \ - return emit(_Code_, o0, o1); \ +#define INST_2x(inst, code, T0, T1) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1) { \ + return emit(code, o0, o1); \ } -#define INST_2i(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_, o0, o1); } \ +#define INST_2i(inst, code, T0, T1) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1) { return emit(code, o0, o1); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ + ASMJIT_INLINE Error inst(const T0& o0, int o1) { return emit(code, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ + ASMJIT_INLINE Error inst(const T0& o0, unsigned int o1) { return emit(code, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); } \ + ASMJIT_INLINE Error inst(const T0& o0, int64_t o1) { return emit(code, o0, Utils::asInt(o1)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1) { return emit(_Code_, o0, Utils::asInt(o1)); } + ASMJIT_INLINE Error inst(const T0& o0, uint64_t o1) { return emit(code, o0, Utils::asInt(o1)); } -#define INST_2cc(_Inst_, _Code_, _Translate_, _Op0_, _Op1_) \ - ASMJIT_INLINE Error _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \ +#define INST_2cc(inst, code, _Translate_, T0, T1) \ + ASMJIT_INLINE Error inst(uint32_t cc, const T0& o0, const T1& o1) { \ return emit(_Translate_(cc), o0, o1); \ } \ \ - ASMJIT_INLINE Error _Inst_##a(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##a, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##ae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ae, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##b(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##b, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##be(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##be, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##c(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##c, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##e(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##e, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##g(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##g, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##ge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ge, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##l(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##l, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##le(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##le, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##na(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##na, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nae, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nb(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nb, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nbe, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nc(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nc, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##ne(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ne, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##ng(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ng, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nge, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nl(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nl, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nle(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nle, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##no(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##no, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##np(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##np, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##ns(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ns, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##nz(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nz, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##o(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##o, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##p(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##p, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##pe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##pe, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##po(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##po, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##s(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##s, o0, o1); } \ - ASMJIT_INLINE Error _Inst_##z(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##z, o0, o1); } + ASMJIT_INLINE Error inst##a(const T0& o0, const T1& o1) { return emit(code##a, o0, o1); } \ + ASMJIT_INLINE Error inst##ae(const T0& o0, const T1& o1) { return emit(code##ae, o0, o1); } \ + ASMJIT_INLINE Error inst##b(const T0& o0, const T1& o1) { return emit(code##b, o0, o1); } \ + ASMJIT_INLINE Error inst##be(const T0& o0, const T1& o1) { return emit(code##be, o0, o1); } \ + ASMJIT_INLINE Error inst##c(const T0& o0, const T1& o1) { return emit(code##c, o0, o1); } \ + ASMJIT_INLINE Error inst##e(const T0& o0, const T1& o1) { return emit(code##e, o0, o1); } \ + ASMJIT_INLINE Error inst##g(const T0& o0, const T1& o1) { return emit(code##g, o0, o1); } \ + ASMJIT_INLINE Error inst##ge(const T0& o0, const T1& o1) { return emit(code##ge, o0, o1); } \ + ASMJIT_INLINE Error inst##l(const T0& o0, const T1& o1) { return emit(code##l, o0, o1); } \ + ASMJIT_INLINE Error inst##le(const T0& o0, const T1& o1) { return emit(code##le, o0, o1); } \ + ASMJIT_INLINE Error inst##na(const T0& o0, const T1& o1) { return emit(code##na, o0, o1); } \ + ASMJIT_INLINE Error inst##nae(const T0& o0, const T1& o1) { return emit(code##nae, o0, o1); } \ + ASMJIT_INLINE Error inst##nb(const T0& o0, const T1& o1) { return emit(code##nb, o0, o1); } \ + ASMJIT_INLINE Error inst##nbe(const T0& o0, const T1& o1) { return emit(code##nbe, o0, o1); } \ + ASMJIT_INLINE Error inst##nc(const T0& o0, const T1& o1) { return emit(code##nc, o0, o1); } \ + ASMJIT_INLINE Error inst##ne(const T0& o0, const T1& o1) { return emit(code##ne, o0, o1); } \ + ASMJIT_INLINE Error inst##ng(const T0& o0, const T1& o1) { return emit(code##ng, o0, o1); } \ + ASMJIT_INLINE Error inst##nge(const T0& o0, const T1& o1) { return emit(code##nge, o0, o1); } \ + ASMJIT_INLINE Error inst##nl(const T0& o0, const T1& o1) { return emit(code##nl, o0, o1); } \ + ASMJIT_INLINE Error inst##nle(const T0& o0, const T1& o1) { return emit(code##nle, o0, o1); } \ + ASMJIT_INLINE Error inst##no(const T0& o0, const T1& o1) { return emit(code##no, o0, o1); } \ + ASMJIT_INLINE Error inst##np(const T0& o0, const T1& o1) { return emit(code##np, o0, o1); } \ + ASMJIT_INLINE Error inst##ns(const T0& o0, const T1& o1) { return emit(code##ns, o0, o1); } \ + ASMJIT_INLINE Error inst##nz(const T0& o0, const T1& o1) { return emit(code##nz, o0, o1); } \ + ASMJIT_INLINE Error inst##o(const T0& o0, const T1& o1) { return emit(code##o, o0, o1); } \ + ASMJIT_INLINE Error inst##p(const T0& o0, const T1& o1) { return emit(code##p, o0, o1); } \ + ASMJIT_INLINE Error inst##pe(const T0& o0, const T1& o1) { return emit(code##pe, o0, o1); } \ + ASMJIT_INLINE Error inst##po(const T0& o0, const T1& o1) { return emit(code##po, o0, o1); } \ + ASMJIT_INLINE Error inst##s(const T0& o0, const T1& o1) { return emit(code##s, o0, o1); } \ + ASMJIT_INLINE Error inst##z(const T0& o0, const T1& o1) { return emit(code##z, o0, o1); } -#define INST_3x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } +#define INST_3x(inst, code, T0, T1, T2) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); } -#define INST_3i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \ +#define INST_3i(inst, code, T0, T1, T2) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, unsigned int o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int64_t o2) { return emit(code, o0, o1, Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { return emit(_Code_, o0, o1, Utils::asInt(o2)); } + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, uint64_t o2) { return emit(code, o0, o1, Utils::asInt(o2)); } -#define INST_3ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { return emit(_Code_, o0, o1, o2); } \ +#define INST_3ii(inst, code, T0, T1, T2) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2) { return emit(code, o0, o1, o2); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int o1, int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, int o1, int o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, unsigned int o1, unsigned int o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } \ + ASMJIT_INLINE Error inst(const T0& o0, int64_t o1, int64_t o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { return emit(_Code_, o0, Imm(o1), Utils::asInt(o2)); } + ASMJIT_INLINE Error inst(const T0& o0, uint64_t o1, uint64_t o2) { return emit(code, o0, Imm(o1), Utils::asInt(o2)); } -#define INST_4x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } +#define INST_4x(inst, code, T0, T1, T2, T3) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); } -#define INST_4i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \ +#define INST_4i(inst, code, T0, T1, T2, T3) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, int o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, unsigned int o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, int64_t o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { return emit(_Code_, o0, o1, o2, Utils::asInt(o3)); } + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, uint64_t o3) { return emit(code, o0, o1, o2, Utils::asInt(o3)); } -#define INST_4ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { return emit(_Code_, o0, o1, o2, o3); } \ +#define INST_4ii(inst, code, T0, T1, T2, T3) \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, const T2& o2, const T3& o3) { return emit(code, o0, o1, o2, o3); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int o2, int o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, unsigned int o2, unsigned int o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } \ + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, int64_t o2, int64_t o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } \ /*! \overload */ \ - ASMJIT_INLINE Error _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { return emit(_Code_, o0, o1, Imm(o2), Utils::asInt(o3)); } + ASMJIT_INLINE Error inst(const T0& o0, const T1& o1, uint64_t o2, uint64_t o3) { return emit(code, o0, o1, Imm(o2), Utils::asInt(o3)); } // -------------------------------------------------------------------------- // [X86/X64] diff --git a/src/asmjit/x86/x86compiler.cpp b/src/asmjit/x86/x86compiler.cpp index 6378d6e..01b2507 100644 --- a/src/asmjit/x86/x86compiler.cpp +++ b/src/asmjit/x86/x86compiler.cpp @@ -43,39 +43,35 @@ namespace asmjit { // [asmjit::X86VarInfo] // ============================================================================ -#define C(_Class_) kX86RegClass##_Class_ -#define D(_Desc_) kVarFlag##_Desc_ - -const X86VarInfo _x86VarInfo[] = { - /* 00: kVarTypeInt8 */ { kX86RegTypeGpbLo, 1 , C(Gp) , 0 , "gpb" }, - /* 01: kVarTypeUInt8 */ { kX86RegTypeGpbLo, 1 , C(Gp) , 0 , "gpb" }, - /* 02: kVarTypeInt16 */ { kX86RegTypeGpw , 2 , C(Gp) , 0 , "gpw" }, - /* 03: kVarTypeUInt16 */ { kX86RegTypeGpw , 2 , C(Gp) , 0 , "gpw" }, - /* 04: kVarTypeInt32 */ { kX86RegTypeGpd , 4 , C(Gp) , 0 , "gpd" }, - /* 05: kVarTypeUInt32 */ { kX86RegTypeGpd , 4 , C(Gp) , 0 , "gpd" }, - /* 06: kVarTypeInt64 */ { kX86RegTypeGpq , 8 , C(Gp) , 0 , "gpq" }, - /* 07: kVarTypeUInt64 */ { kX86RegTypeGpq , 8 , C(Gp) , 0 , "gpq" }, - /* 08: kVarTypeIntPtr */ { 0 , 0 , C(Gp) , 0 , "" }, // Remapped. - /* 09: kVarTypeUIntPtr */ { 0 , 0 , C(Gp) , 0 , "" }, // Remapped. - /* 10: kVarTypeFp32 */ { kX86RegTypeFp , 4 , C(Fp) , D(Sp) , "fp" }, - /* 11: kVarTypeFp64 */ { kX86RegTypeFp , 8 , C(Fp) , D(Dp) , "fp" }, - /* 12: kX86VarTypeMm */ { kX86RegTypeMm , 8 , C(Mm) , 0 , "mm" }, - /* 13: kX86VarTypeK */ { kX86RegTypeK , 8 , C(K) , 0 , "k" }, - /* 14: kX86VarTypeXmm */ { kX86RegTypeXmm , 16, C(Xyz), 0 , "xmm" }, - /* 15: kX86VarTypeXmmSs */ { kX86RegTypeXmm , 4 , C(Xyz), D(Sp) , "xmm" }, - /* 16: kX86VarTypeXmmPs */ { kX86RegTypeXmm , 16, C(Xyz), D(Sp) | D(Packed), "xmm" }, - /* 17: kX86VarTypeXmmSd */ { kX86RegTypeXmm , 8 , C(Xyz), D(Dp) , "xmm" }, - /* 18: kX86VarTypeXmmPd */ { kX86RegTypeXmm , 16, C(Xyz), D(Dp) | D(Packed), "xmm" }, - /* 19: kX86VarTypeYmm */ { kX86RegTypeYmm , 32, C(Xyz), 0 , "ymm" }, - /* 20: kX86VarTypeYmmPs */ { kX86RegTypeYmm , 32, C(Xyz), D(Sp) | D(Packed), "ymm" }, - /* 21: kX86VarTypeYmmPd */ { kX86RegTypeYmm , 32, C(Xyz), D(Dp) | D(Packed), "ymm" }, - /* 22: kX86VarTypeZmm */ { kX86RegTypeZmm , 64, C(Xyz), 0 , "zmm" }, - /* 23: kX86VarTypeZmmPs */ { kX86RegTypeZmm , 64, C(Xyz), D(Sp) | D(Packed), "zmm" }, - /* 24: kX86VarTypeZmmPd */ { kX86RegTypeZmm , 64, C(Xyz), D(Dp) | D(Packed), "zmm" } +#define F(flag) VarInfo::kFlag##flag +const VarInfo _x86VarInfo[] = { + { kVarTypeInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" }, + { kVarTypeUInt8 , 1 , kX86RegClassGp , kX86RegTypeGpbLo, 0 , "gpb" }, + { kVarTypeInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" }, + { kVarTypeUInt16 , 2 , kX86RegClassGp , kX86RegTypeGpw , 0 , "gpw" }, + { kVarTypeInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" }, + { kVarTypeUInt32 , 4 , kX86RegClassGp , kX86RegTypeGpd , 0 , "gpd" }, + { kVarTypeInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" }, + { kVarTypeUInt64 , 8 , kX86RegClassGp , kX86RegTypeGpq , 0 , "gpq" }, + { kVarTypeIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract. + { kVarTypeUIntPtr , 0 , kX86RegClassGp , 0 , 0 , "" }, // Abstract. + { kVarTypeFp32 , 4 , kX86RegClassFp , kX86RegTypeFp , F(SP) , "fp" }, + { kVarTypeFp64 , 8 , kX86RegClassFp , kX86RegTypeFp , F(DP) , "fp" }, + { kX86VarTypeMm , 8 , kX86RegClassMm , kX86RegTypeMm , 0 | F(SIMD), "mm" }, + { kX86VarTypeK , 8 , kX86RegClassK , kX86RegTypeK , 0 , "k" }, + { kX86VarTypeXmm , 16, kX86RegClassXyz, kX86RegTypeXmm , 0 | F(SIMD), "xmm" }, + { kX86VarTypeXmmSs, 4 , kX86RegClassXyz, kX86RegTypeXmm , F(SP) , "xmm" }, + { kX86VarTypeXmmPs, 16, kX86RegClassXyz, kX86RegTypeXmm , F(SP) | F(SIMD), "xmm" }, + { kX86VarTypeXmmSd, 8 , kX86RegClassXyz, kX86RegTypeXmm , F(DP) , "xmm" }, + { kX86VarTypeXmmPd, 16, kX86RegClassXyz, kX86RegTypeXmm , F(DP) | F(SIMD), "xmm" }, + { kX86VarTypeYmm , 32, kX86RegClassXyz, kX86RegTypeYmm , 0 | F(SIMD), "ymm" }, + { kX86VarTypeYmmPs, 32, kX86RegClassXyz, kX86RegTypeYmm , F(SP) | F(SIMD), "ymm" }, + { kX86VarTypeYmmPd, 32, kX86RegClassXyz, kX86RegTypeYmm , F(DP) | F(SIMD), "ymm" }, + { kX86VarTypeZmm , 64, kX86RegClassXyz, kX86RegTypeZmm , 0 | F(SIMD), "zmm" }, + { kX86VarTypeZmmPs, 64, kX86RegClassXyz, kX86RegTypeZmm , F(SP) | F(SIMD), "zmm" }, + { kX86VarTypeZmmPd, 64, kX86RegClassXyz, kX86RegTypeZmm , F(DP) | F(SIMD), "zmm" } }; - -#undef D -#undef C +#undef F #if defined(ASMJIT_BUILD_X86) const uint8_t _x86VarMapping[kX86VarTypeCount] = { @@ -141,7 +137,7 @@ const uint8_t _x64VarMapping[kX86VarTypeCount] = { // [asmjit::X86CallNode - Arg / Ret] // ============================================================================ -bool X86CallNode::_setArg(uint32_t i, const Operand& op) { +bool X86CallNode::_setArg(uint32_t i, const Operand& op) noexcept { if ((i & ~kFuncArgHi) >= _x86Decl.getNumArgs()) return false; @@ -149,7 +145,7 @@ bool X86CallNode::_setArg(uint32_t i, const Operand& op) { return true; } -bool X86CallNode::_setRet(uint32_t i, const Operand& op) { +bool X86CallNode::_setRet(uint32_t i, const Operand& op) noexcept { if (i >= 2) return false; @@ -161,7 +157,7 @@ bool X86CallNode::_setRet(uint32_t i, const Operand& op) { // [asmjit::X86Compiler - Construction / Destruction] // ============================================================================ -X86Compiler::X86Compiler(X86Assembler* assembler) +X86Compiler::X86Compiler(X86Assembler* assembler) noexcept : Compiler(), zax(NoInit), zcx(NoInit), @@ -186,7 +182,7 @@ X86Compiler::X86Compiler(X86Assembler* assembler) attach(assembler); } -X86Compiler::~X86Compiler() { +X86Compiler::~X86Compiler() noexcept { reset(true); } @@ -194,7 +190,7 @@ X86Compiler::~X86Compiler() { // [asmjit::X86Compiler - Attach / Reset] // ============================================================================ -Error X86Compiler::attach(Assembler* assembler) { +Error X86Compiler::attach(Assembler* assembler) noexcept { ASMJIT_ASSERT(assembler != nullptr); if (_assembler != nullptr) @@ -224,7 +220,6 @@ Error X86Compiler::attach(Assembler* assembler) { _regSize = static_cast(assembler->getRegSize()); _regCount = static_cast(assembler)->getRegCount(); _finalized = false; - _lastError = kErrorOk; zax = static_cast(assembler)->zax; zcx = static_cast(assembler)->zcx; @@ -238,7 +233,7 @@ Error X86Compiler::attach(Assembler* assembler) { return kErrorOk; } -void X86Compiler::reset(bool releaseMemory) { +void X86Compiler::reset(bool releaseMemory) noexcept { Compiler::reset(releaseMemory); _regCount.reset(); @@ -256,7 +251,7 @@ void X86Compiler::reset(bool releaseMemory) { // [asmjit::X86Compiler - Finalize] // ============================================================================ -Error X86Compiler::finalize() { +Error X86Compiler::finalize() noexcept { X86Assembler* assembler = getAssembler(); if (assembler == nullptr) return kErrorOk; @@ -283,7 +278,7 @@ Error X86Compiler::finalize() { start = node; _resetTokenGenerator(); - if (node->getType() == kHLNodeTypeFunc) { + if (node->getType() == HLNode::kTypeFunc) { node = static_cast(start)->getEnd(); error = context.compile(static_cast(start)); @@ -293,7 +288,7 @@ Error X86Compiler::finalize() { do { node = node->getNext(); - } while (node != nullptr && node->getType() != kHLNodeTypeFunc); + } while (node != nullptr && node->getType() != HLNode::kTypeFunc); error = context.serialize(assembler, start, node); context.cleanup(); @@ -311,11 +306,11 @@ Error X86Compiler::finalize() { // ============================================================================ //! Get compiler instruction item size without operands assigned. -static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) { +static ASMJIT_INLINE size_t X86Compiler_getInstSize(uint32_t code) noexcept { return Utils::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend) ? sizeof(HLJump) : sizeof(HLInst); } -static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) { +static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, uint32_t options, Operand* opList, uint32_t opCount) noexcept { if (Utils::inInterval(code, _kX86InstIdJbegin, _kX86InstIdJend)) { HLJump* node = new(p) HLJump(self, code, options, opList, opCount); HLLabel* jTarget = nullptr; @@ -327,7 +322,7 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui options |= kInstOptionUnfollow; } - node->orFlags(code == kX86InstIdJmp ? kHLNodeFlagIsJmp | kHLNodeFlagIsTaken : kHLNodeFlagIsJcc); + node->orFlags(code == kX86InstIdJmp ? HLNode::kFlagIsJmp | HLNode::kFlagIsTaken : HLNode::kFlagIsJcc); node->_target = jTarget; node->_jumpNext = nullptr; @@ -339,9 +334,9 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui // The 'jmp' is always taken, conditional jump can contain hint, we detect it. if (code == kX86InstIdJmp) - node->orFlags(kHLNodeFlagIsTaken); + node->orFlags(HLNode::kFlagIsTaken); else if (options & kInstOptionTaken) - node->orFlags(kHLNodeFlagIsTaken); + node->orFlags(HLNode::kFlagIsTaken); node->addOptions(options); return node; @@ -353,7 +348,7 @@ static HLInst* X86Compiler_newInst(X86Compiler* self, void* p, uint32_t code, ui } } -HLInst* X86Compiler::newInst(uint32_t code) { +HLInst* X86Compiler::newInst(uint32_t code) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size)); @@ -367,7 +362,7 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size + 1 * sizeof(Operand))); @@ -386,7 +381,7 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size + 2 * sizeof(Operand))); @@ -407,7 +402,7 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size + 3 * sizeof(Operand))); @@ -430,7 +425,7 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size + 4 * sizeof(Operand))); @@ -455,7 +450,7 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { +HLInst* X86Compiler::newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept { size_t size = X86Compiler_getInstSize(code); HLInst* inst = static_cast(_zoneAllocator.alloc(size + 5 * sizeof(Operand))); @@ -482,49 +477,49 @@ _NoMemory: return nullptr; } -HLInst* X86Compiler::emit(uint32_t code) { +HLInst* X86Compiler::emit(uint32_t code) noexcept { HLInst* node = newInst(code); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0) noexcept { HLInst* node = newInst(code, o0); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1){ +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1) noexcept { HLInst* node = newInst(code, o0, o1); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept { HLInst* node = newInst(code, o0, o1, o2); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3){ +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept { HLInst* node = newInst(code, o0, o1, o2, o3); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept { HLInst* node = newInst(code, o0, o1, o2, o3, o4); if (node == nullptr) return nullptr; return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, int o0_) { +HLInst* X86Compiler::emit(uint32_t code, int o0_) noexcept { Imm o0(o0_); HLInst* node = newInst(code, o0); if (node == nullptr) @@ -532,7 +527,7 @@ HLInst* X86Compiler::emit(uint32_t code, int o0_) { return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) { +HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) noexcept { Imm o0(o0_); HLInst* node = newInst(code, o0); if (node == nullptr) @@ -540,7 +535,7 @@ HLInst* X86Compiler::emit(uint32_t code, uint64_t o0_) { return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) noexcept { Imm o1(o1_); HLInst* node = newInst(code, o0, o1); if (node == nullptr) @@ -548,7 +543,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, int o1_) { return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) noexcept { Imm o1(o1_); HLInst* node = newInst(code, o0, o1); if (node == nullptr) @@ -556,7 +551,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, uint64_t o1_) { return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, int o2_) noexcept { Imm o2(o2_); HLInst* node = newInst(code, o0, o1, o2); if (node == nullptr) @@ -564,7 +559,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, i return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2_) noexcept { Imm o2(o2_); HLInst* node = newInst(code, o0, o1, o2); if (node == nullptr) @@ -572,7 +567,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, u return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3_) noexcept { Imm o3(o3_); HLInst* node = newInst(code, o0, o1, o2, o3); if (node == nullptr) @@ -580,7 +575,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, c return static_cast(addNode(node)); } -HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) { +HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3_) noexcept { Imm o3(o3_); HLInst* node = newInst(code, o0, o1, o2, o3); if (node == nullptr) @@ -592,7 +587,7 @@ HLInst* X86Compiler::emit(uint32_t code, const Operand& o0, const Operand& o1, c // [asmjit::X86Compiler - Func] // ============================================================================ -X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) { +X86FuncNode* X86Compiler::newFunc(const FuncPrototype& p) noexcept { X86FuncNode* func = newNode(); Error error; @@ -639,7 +634,7 @@ _NoMemory: return nullptr; } -X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) { +X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) noexcept { X86FuncNode* func = newFunc(p); if (func == nullptr) { @@ -661,7 +656,7 @@ X86FuncNode* X86Compiler::addFunc(const FuncPrototype& p) { return func; } -HLSentinel* X86Compiler::endFunc() { +HLSentinel* X86Compiler::endFunc() noexcept { X86FuncNode* func = getFunc(); ASMJIT_ASSERT(func != nullptr); @@ -686,7 +681,7 @@ HLSentinel* X86Compiler::endFunc() { // [asmjit::X86Compiler - Ret] // ============================================================================ -HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) { +HLRet* X86Compiler::newRet(const Operand& o0, const Operand& o1) noexcept { HLRet* node = newNode(o0, o1); if (node == nullptr) goto _NoMemory; @@ -697,7 +692,7 @@ _NoMemory: return nullptr; } -HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) { +HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) noexcept { HLRet* node = newRet(o0, o1); if (node == nullptr) return node; @@ -708,7 +703,7 @@ HLRet* X86Compiler::addRet(const Operand& o0, const Operand& o1) { // [asmjit::X86Compiler - Call] // ============================================================================ -X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) { +X86CallNode* X86Compiler::newCall(const Operand& o0, const FuncPrototype& p) noexcept { X86CallNode* node = newNode(o0); Error error; uint32_t nArgs; @@ -737,7 +732,7 @@ _NoMemory: return nullptr; } -X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) { +X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) noexcept { X86CallNode* node = newCall(o0, p); if (node == nullptr) return nullptr; @@ -748,7 +743,7 @@ X86CallNode* X86Compiler::addCall(const Operand& o0, const FuncPrototype& p) { // [asmjit::X86Compiler - Vars] // ============================================================================ -Error X86Compiler::setArg(uint32_t argIndex, const Var& var) { +Error X86Compiler::setArg(uint32_t argIndex, const Var& var) noexcept { X86FuncNode* func = getFunc(); if (func == nullptr) @@ -763,7 +758,7 @@ Error X86Compiler::setArg(uint32_t argIndex, const Var& var) { return kErrorOk; } -Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list ap) { +Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name) noexcept { ASMJIT_ASSERT(vType < kX86VarTypeCount); vType = _targetVarMapping[vType]; ASMJIT_ASSERT(vType != kInvalidVar); @@ -775,44 +770,47 @@ Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* name, va_list a return kErrorInvalidArgument; } - const X86VarInfo& vInfo = _x86VarInfo[vType]; - char buf[64]; + const VarInfo& vInfo = _x86VarInfo[vType]; + VarData* vd = _newVd(vInfo, name); - // Format the name if `ap` is given. - if (ap) { - vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), name, ap); - buf[ASMJIT_ARRAY_SIZE(buf) - 1] = '\0'; - name = buf; - } - - VarData* vd = _newVd(vType, vInfo.getSize(), vInfo.getClass(), name); if (vd == nullptr) { static_cast(var)->reset(); return getLastError(); } - var->_init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, vd->getId()); + var->_init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, vd->getId()); var->_vreg.vType = vType; return kErrorOk; } +Error X86Compiler::_newVar(Var* var, uint32_t vType, const char* fmt, va_list ap) noexcept { + char name[64]; + + vsnprintf(name, ASMJIT_ARRAY_SIZE(name), fmt, ap); + name[ASMJIT_ARRAY_SIZE(name) - 1] = '\0'; + return _newVar(var, vType, name); +} + // ============================================================================ // [asmjit::X86Compiler - Stack] // ============================================================================ -Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) { +Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept { if (size == 0) return kErrorInvalidArgument; if (alignment > 64) alignment = 64; - VarData* vd = _newVd(kInvalidVar, size, kInvalidReg, name); + VarInfo vi = { kInvalidVar, 0, kInvalidReg , kInvalidReg, 0, "" }; + VarData* vd = _newVd(vi, name); + if (vd == nullptr) { static_cast(mem)->reset(); return getLastError(); } + vd->_size = size; vd->_isStack = true; vd->_alignment = static_cast(alignment); @@ -824,7 +822,7 @@ Error X86Compiler::_newStack(BaseMem* mem, uint32_t size, uint32_t alignment, co // [asmjit::X86Compiler - Const] // ============================================================================ -Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) { +Error X86Compiler::_newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept { Error error = kErrorOk; size_t offset; diff --git a/src/asmjit/x86/x86compiler.h b/src/asmjit/x86/x86compiler.h index 49b0b2c..84b83ef 100644 --- a/src/asmjit/x86/x86compiler.h +++ b/src/asmjit/x86/x86compiler.h @@ -32,52 +32,13 @@ struct X86FuncNode; //! \addtogroup asmjit_x86 //! \{ -// ============================================================================ -// [asmjit::X86VarInfo] -// ============================================================================ - //! \internal -//! -//! X86 variable information. -struct X86VarInfo { - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get register type, see `X86RegType`. - ASMJIT_INLINE uint32_t getReg() const noexcept { return _reg; } - //! Get register size in bytes. - ASMJIT_INLINE uint32_t getSize() const noexcept { return _size; } - //! Get variable class, see `RegClass`. - ASMJIT_INLINE uint32_t getClass() const noexcept { return _class; } - //! Get variable description, see `VarFlag`. - ASMJIT_INLINE uint32_t getDesc() const noexcept { return _desc; } - //! Get variable type name. - ASMJIT_INLINE const char* getName() const noexcept { return _name; } - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Register type, see `X86RegType`. - uint8_t _reg; - //! Register size in bytes. - uint8_t _size; - //! Register class, see `RegClass`. - uint8_t _class; - //! Variable flags, see `VarFlag`. - uint8_t _desc; - //! Variable type name. - char _name[4]; -}; - -//! \internal -ASMJIT_VARAPI const X86VarInfo _x86VarInfo[]; +ASMJIT_VARAPI const VarInfo _x86VarInfo[]; #if defined(ASMJIT_BUILD_X86) //! \internal //! -//! Mapping of x86 variables into their real IDs. +//! Mapping of x86 variable types, including all abstract types, into their real types. //! //! This mapping translates the following: //! - `kVarTypeInt64` to `kInvalidVar`. @@ -90,7 +51,7 @@ ASMJIT_VARAPI const uint8_t _x86VarMapping[kX86VarTypeCount]; #if defined(ASMJIT_BUILD_X64) //! \internal //! -//! Mapping of x64 variables into their real IDs. +//! Mapping of x64 variable types, including all abstract types, into their real types. //! //! This mapping translates the following: //! - `kVarTypeIntPtr` to `kVarTypeInt64`. @@ -111,7 +72,7 @@ struct X86FuncNode : public HLFunc { // -------------------------------------------------------------------------- //! Create a new `X86FuncNode` instance. - ASMJIT_INLINE X86FuncNode(Compiler* compiler) : HLFunc(compiler) { + ASMJIT_INLINE X86FuncNode(Compiler* compiler) noexcept : HLFunc(compiler) { _decl = &_x86Decl; _saveRestoreRegs.reset(); @@ -129,48 +90,48 @@ struct X86FuncNode : public HLFunc { } //! Destroy the `X86FuncNode` instance. - ASMJIT_INLINE ~X86FuncNode() {} + ASMJIT_INLINE ~X86FuncNode() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get function declaration as `X86FuncDecl`. - ASMJIT_INLINE X86FuncDecl* getDecl() const { + ASMJIT_INLINE X86FuncDecl* getDecl() const noexcept { return const_cast(&_x86Decl); } //! Get argument. - ASMJIT_INLINE VarData* getArg(uint32_t i) const { + ASMJIT_INLINE VarData* getArg(uint32_t i) const noexcept { ASMJIT_ASSERT(i < _x86Decl.getNumArgs()); return static_cast(_args[i]); } //! Get registers which have to be saved in prolog/epilog. - ASMJIT_INLINE uint32_t getSaveRestoreRegs(uint32_t rc) { return _saveRestoreRegs.get(rc); } + ASMJIT_INLINE uint32_t getSaveRestoreRegs(uint32_t rc) noexcept { return _saveRestoreRegs.get(rc); } //! Get stack size needed to align stack back to the nature alignment. - ASMJIT_INLINE uint32_t getAlignStackSize() const { return _alignStackSize; } + ASMJIT_INLINE uint32_t getAlignStackSize() const noexcept { return _alignStackSize; } //! Set stack size needed to align stack back to the nature alignment. - ASMJIT_INLINE void setAlignStackSize(uint32_t s) { _alignStackSize = s; } + ASMJIT_INLINE void setAlignStackSize(uint32_t s) noexcept { _alignStackSize = s; } //! Get aligned stack size used by variables and memory allocated on the stack. - ASMJIT_INLINE uint32_t getAlignedMemStackSize() const { return _alignedMemStackSize; } + ASMJIT_INLINE uint32_t getAlignedMemStackSize() const noexcept { return _alignedMemStackSize; } //! Get stack size used by push/pop sequences in prolog/epilog. - ASMJIT_INLINE uint32_t getPushPopStackSize() const { return _pushPopStackSize; } + ASMJIT_INLINE uint32_t getPushPopStackSize() const noexcept { return _pushPopStackSize; } //! Set stack size used by push/pop sequences in prolog/epilog. - ASMJIT_INLINE void setPushPopStackSize(uint32_t s) { _pushPopStackSize = s; } + ASMJIT_INLINE void setPushPopStackSize(uint32_t s) noexcept { _pushPopStackSize = s; } //! Get stack size used by mov sequences in prolog/epilog. - ASMJIT_INLINE uint32_t getMoveStackSize() const { return _moveStackSize; } + ASMJIT_INLINE uint32_t getMoveStackSize() const noexcept { return _moveStackSize; } //! Set stack size used by mov sequences in prolog/epilog. - ASMJIT_INLINE void setMoveStackSize(uint32_t s) { _moveStackSize = s; } + ASMJIT_INLINE void setMoveStackSize(uint32_t s) noexcept { _moveStackSize = s; } //! Get extra stack size. - ASMJIT_INLINE uint32_t getExtraStackSize() const { return _extraStackSize; } + ASMJIT_INLINE uint32_t getExtraStackSize() const noexcept { return _extraStackSize; } //! Set extra stack size. - ASMJIT_INLINE void setExtraStackSize(uint32_t s) { _extraStackSize = s; } + ASMJIT_INLINE void setExtraStackSize(uint32_t s) noexcept { _extraStackSize = s; } //! Get whether the function has stack frame register. //! @@ -178,15 +139,23 @@ struct X86FuncNode : public HLFunc { //! generating standard prolog/epilog sequence. //! //! \note Used only when stack is misaligned. - ASMJIT_INLINE bool hasStackFrameReg() const { return _stackFrameRegIndex != kInvalidReg; } + ASMJIT_INLINE bool hasStackFrameReg() const noexcept { + return _stackFrameRegIndex != kInvalidReg; + } + //! Get stack frame register index. //! //! \note Used only when stack is misaligned. - ASMJIT_INLINE uint32_t getStackFrameRegIndex() const { return _stackFrameRegIndex; } + ASMJIT_INLINE uint32_t getStackFrameRegIndex() const noexcept { + return _stackFrameRegIndex; + } + //! Get whether the stack frame register is preserved. //! //! \note Used only when stack is misaligned. - ASMJIT_INLINE bool isStackFrameRegPreserved() const { return static_cast(_isStackFrameRegPreserved); } + ASMJIT_INLINE bool isStackFrameRegPreserved() const noexcept { + return static_cast(_isStackFrameRegPreserved); + } // -------------------------------------------------------------------------- // [Members] @@ -233,20 +202,20 @@ struct X86CallNode : public HLCall { // -------------------------------------------------------------------------- //! Create a new `X86CallNode` instance. - ASMJIT_INLINE X86CallNode(Compiler* compiler, const Operand& target) : HLCall(compiler, target) { + ASMJIT_INLINE X86CallNode(Compiler* compiler, const Operand& target) noexcept : HLCall(compiler, target) { _decl = &_x86Decl; _usedArgs.reset(); } //! Destroy the `X86CallNode` instance. - ASMJIT_INLINE ~X86CallNode() {} + ASMJIT_INLINE ~X86CallNode() noexcept {} // -------------------------------------------------------------------------- // [Accessors] // -------------------------------------------------------------------------- //! Get the function prototype. - ASMJIT_INLINE X86FuncDecl* getDecl() const { + ASMJIT_INLINE X86FuncDecl* getDecl() const noexcept { return const_cast(&_x86Decl); } @@ -255,7 +224,7 @@ struct X86CallNode : public HLCall { // -------------------------------------------------------------------------- //! Set function prototype. - ASMJIT_INLINE Error setPrototype(const FuncPrototype& p) { + ASMJIT_INLINE Error setPrototype(const FuncPrototype& p) noexcept { return _x86Decl.setPrototype(p); } @@ -264,21 +233,21 @@ struct X86CallNode : public HLCall { // -------------------------------------------------------------------------- //! Set argument at `i` to `op`. - ASMJIT_API bool _setArg(uint32_t i, const Operand& op); + ASMJIT_API bool _setArg(uint32_t i, const Operand& op) noexcept; //! Set return at `i` to `op`. - ASMJIT_API bool _setRet(uint32_t i, const Operand& op); + ASMJIT_API bool _setRet(uint32_t i, const Operand& op) noexcept; //! Set argument at `i` to `var`. - ASMJIT_INLINE bool setArg(uint32_t i, const Var& var) { return _setArg(i, var); } + ASMJIT_INLINE bool setArg(uint32_t i, const Var& var) noexcept { return _setArg(i, var); } //! Set argument at `i` to `reg` (FP registers only). - ASMJIT_INLINE bool setArg(uint32_t i, const X86FpReg& reg) { return _setArg(i, reg); } + ASMJIT_INLINE bool setArg(uint32_t i, const X86FpReg& reg) noexcept { return _setArg(i, reg); } //! Set argument at `i` to `imm`. - ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) { return _setArg(i, imm); } + ASMJIT_INLINE bool setArg(uint32_t i, const Imm& imm) noexcept { return _setArg(i, imm); } //! Set return at `i` to `var`. - ASMJIT_INLINE bool setRet(uint32_t i, const Var& var) { return _setRet(i, var); } + ASMJIT_INLINE bool setRet(uint32_t i, const Var& var) noexcept { return _setRet(i, var); } //! Set return at `i` to `reg` (FP registers only). - ASMJIT_INLINE bool setRet(uint32_t i, const X86FpReg& reg) { return _setRet(i, reg); } + ASMJIT_INLINE bool setRet(uint32_t i, const X86FpReg& reg) noexcept { return _setRet(i, reg); } // -------------------------------------------------------------------------- // [Members] @@ -534,9 +503,9 @@ struct X86CallNode : public HLCall { //! //! c.addFunc(FuncBuilder0(kCallConvHost)); //! +//! Label L0 = c.newLabel(); //! X86GpVar x = c.newInt32("x"); //! X86GpVar y = c.newInt32("y"); -//! Label L0(c); //! //! // After these two lines, `x` and `y` will be always stored in registers: //! // x - register. @@ -618,9 +587,9 @@ struct X86CallNode : public HLCall { //! //! c.addFunc(FuncBuilder0(kCallConvHost)); //! +//! Label L0 = c.newLabel(); //! X86GpVar x = c.newInt32("x"); //! X86GpVar y = c.newInt32("y"); -//! Label L0(c); //! //! // After these two lines, `x` and `y` will be always stored in registers. //! // `x` - register. @@ -750,30 +719,30 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Create a `X86Compiler` instance. - ASMJIT_API X86Compiler(X86Assembler* assembler = nullptr); + ASMJIT_API X86Compiler(X86Assembler* assembler = nullptr) noexcept; //! Destroy the `X86Compiler` instance. - ASMJIT_API ~X86Compiler(); + ASMJIT_API ~X86Compiler() noexcept; // -------------------------------------------------------------------------- // [Attach / Reset] // -------------------------------------------------------------------------- //! \override - ASMJIT_API virtual Error attach(Assembler* assembler); + ASMJIT_API virtual Error attach(Assembler* assembler) noexcept; //! \override - ASMJIT_API virtual void reset(bool releaseMemory); + ASMJIT_API virtual void reset(bool releaseMemory) noexcept; // ------------------------------------------------------------------------- // [Finalize] // ------------------------------------------------------------------------- - ASMJIT_API virtual Error finalize(); + ASMJIT_API virtual Error finalize() noexcept; // -------------------------------------------------------------------------- // [Assembler] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86Assembler* getAssembler() const { + ASMJIT_INLINE X86Assembler* getAssembler() const noexcept { return static_cast(_assembler); } @@ -782,54 +751,54 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Get count of registers of the current architecture and mode. - ASMJIT_INLINE const X86RegCount& getRegCount() const { return _regCount; } + ASMJIT_INLINE const X86RegCount& getRegCount() const noexcept { return _regCount; } //! Get Gpd or Gpq register depending on the current architecture. - ASMJIT_INLINE X86GpReg gpz(uint32_t index) const { return X86GpReg(zax, index); } + ASMJIT_INLINE X86GpReg gpz(uint32_t index) const noexcept { return X86GpReg(zax, index); } //! Create an architecture dependent intptr_t memory operand. - ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, int32_t disp = 0) const noexcept { return x86::ptr(base, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return x86::ptr(base, index, shift, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, int32_t disp = 0) const noexcept { return x86::ptr(label, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const noexcept { return x86::ptr(label, index, shift, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const X86RipReg& rip, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr(const X86RipReg& rip, int32_t disp = 0) const noexcept { return x86::ptr(rip, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, int32_t disp = 0) const noexcept { return x86::ptr_abs(pAbs, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpReg& index, uint32_t shift, int32_t disp = 0) const noexcept { return x86::ptr_abs(pAbs, index, shift, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, int32_t disp = 0) { + ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, int32_t disp = 0) noexcept { return x86::ptr(base, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) { + ASMJIT_INLINE X86Mem intptr_ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { return x86::ptr(base, index, shift, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0) { + ASMJIT_INLINE X86Mem intptr_ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0) noexcept { return x86::ptr(label, index, shift, disp, zax.getSize()); } //! \overload - ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpVar& index, uint32_t shift, int32_t disp = 0) { + ASMJIT_INLINE X86Mem intptr_ptr_abs(Ptr pAbs, const X86GpVar& index, uint32_t shift, int32_t disp = 0) noexcept { return x86::ptr_abs(pAbs, index, shift, disp, zax.getSize()); } @@ -838,54 +807,54 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Create a new `HLInst`. - ASMJIT_API HLInst* newInst(uint32_t code); + ASMJIT_API HLInst* newInst(uint32_t code) noexcept; //! \overload - ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0) noexcept; //! \overload - ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1) noexcept; //! \overload - ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept; //! \overload - ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept; //! \overload - ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); + ASMJIT_API HLInst* newInst(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept; //! Add a new `HLInst`. - ASMJIT_API HLInst* emit(uint32_t code); + ASMJIT_API HLInst* emit(uint32_t code) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, const Operand& o3, const Operand& o4) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, int o0); + ASMJIT_API HLInst* emit(uint32_t code, int o0) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, uint64_t o0); + ASMJIT_API HLInst* emit(uint32_t code, uint64_t o0) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, int o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, int o1) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, uint64_t o1); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, uint64_t o1) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, int o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, int o2) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, uint64_t o2) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, int o3) noexcept; //! \overload - ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3); + ASMJIT_API HLInst* emit(uint32_t code, const Operand& o0, const Operand& o1, const Operand& o2, uint64_t o3) noexcept; // -------------------------------------------------------------------------- // [Func] // -------------------------------------------------------------------------- //! Create a new `X86FuncNode`. - ASMJIT_API X86FuncNode* newFunc(const FuncPrototype& p); + ASMJIT_API X86FuncNode* newFunc(const FuncPrototype& p) noexcept; //! Add a new function. //! @@ -946,18 +915,18 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { //! \note To get the current function use `getFunc()` method. //! //! \sa \ref FuncBuilder0, \ref FuncBuilder1, \ref FuncBuilder2. - ASMJIT_API X86FuncNode* addFunc(const FuncPrototype& p); + ASMJIT_API X86FuncNode* addFunc(const FuncPrototype& p) noexcept; //! Emit a sentinel that marks the end of the current function. - ASMJIT_API HLSentinel* endFunc(); + ASMJIT_API HLSentinel* endFunc() noexcept; //! Get the current function node casted to `X86FuncNode`. //! //! This method can be called within `addFunc()` and `endFunc()` block to get //! current function you are working with. It's recommended to store `HLFunc` //! pointer returned by `addFunc<>` method, because this allows you in future - //! implement function sections outside of function itself. - ASMJIT_INLINE X86FuncNode* getFunc() const { + //! implement function sections outside of the function itself. + ASMJIT_INLINE X86FuncNode* getFunc() const noexcept { return static_cast(_func); } @@ -966,31 +935,32 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // -------------------------------------------------------------------------- //! Create a new `HLRet`. - ASMJIT_API HLRet* newRet(const Operand& o0, const Operand& o1); + ASMJIT_API HLRet* newRet(const Operand& o0, const Operand& o1) noexcept; //! Add a new `HLRet`. - ASMJIT_API HLRet* addRet(const Operand& o0, const Operand& o1); + ASMJIT_API HLRet* addRet(const Operand& o0, const Operand& o1) noexcept; // -------------------------------------------------------------------------- // [Call] // -------------------------------------------------------------------------- //! Create a new `X86CallNode`. - ASMJIT_API X86CallNode* newCall(const Operand& o0, const FuncPrototype& p); + ASMJIT_API X86CallNode* newCall(const Operand& o0, const FuncPrototype& p) noexcept; //! Add a new `X86CallNode`. - ASMJIT_API X86CallNode* addCall(const Operand& o0, const FuncPrototype& p); + ASMJIT_API X86CallNode* addCall(const Operand& o0, const FuncPrototype& p) noexcept; // -------------------------------------------------------------------------- // [Args] // -------------------------------------------------------------------------- //! Set function argument to `var`. - ASMJIT_API Error setArg(uint32_t argIndex, const Var& var); + ASMJIT_API Error setArg(uint32_t argIndex, const Var& var) noexcept; // -------------------------------------------------------------------------- // [Vars] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error _newVar(Var* var, uint32_t vType, const char* name, va_list ap); + ASMJIT_API Error _newVar(Var* var, uint32_t vType, const char* name) noexcept; + ASMJIT_API Error _newVar(Var* var, uint32_t vType, const char* fmt, va_list ap) noexcept; #if !defined(ASMJIT_DISABLE_LOGGER) #define ASMJIT_NEW_VAR_TYPE_EX(func, type, typeFirst, typeLast) \ @@ -1025,13 +995,13 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { ASMJIT_ASSERT(Utils::inInterval(vType, typeFirst, typeLast)); \ \ type var(NoInit); \ - _newVar(&var, vType, nullptr, nullptr); \ + _newVar(&var, vType, nullptr); \ return var; \ } #define ASMJIT_NEW_VAR_AUTO_EX(func, type, typeId) \ ASMJIT_NOINLINE type new##func(const char* name, ...) { \ type var(NoInit); \ - _newVar(&var, typeId, nullptr, nullptr); \ + _newVar(&var, typeId, nullptr); \ return var; \ } #endif @@ -1043,8 +1013,8 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { \ type var(NoInit); \ \ - const X86VarInfo& vInfo = _x86VarInfo[vType]; \ - var._init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, id); \ + const VarInfo& vInfo = _x86VarInfo[vType]; \ + var._init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, id); \ var._vreg.vType = vType; \ \ return var; \ @@ -1055,7 +1025,7 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { ASMJIT_ASSERT(Utils::inInterval(vType, typeFirst, typeLast)); \ \ type var(NoInit); \ - _newVar(&var, vType, nullptr, nullptr); \ + _newVar(&var, vType, nullptr); \ return var; \ } \ \ @@ -1065,8 +1035,8 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { ASMJIT_INLINE type get##func##ById(uint32_t id) { \ type var(NoInit); \ \ - const X86VarInfo& vInfo = _x86VarInfo[typeId]; \ - var._init_packed_op_sz_w0_id(kOperandTypeVar, vInfo.getSize(), vInfo.getReg() << 8, id); \ + const VarInfo& vInfo = _x86VarInfo[typeId]; \ + var._init_packed_op_sz_w0_id(Operand::kTypeVar, vInfo.getSize(), vInfo.getRegType() << 8, id); \ var._vreg.vType = typeId; \ \ return var; \ @@ -1074,7 +1044,7 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { \ ASMJIT_INLINE type new##func() { \ type var(NoInit); \ - _newVar(&var, typeId, nullptr, nullptr); \ + _newVar(&var, typeId, nullptr); \ return var; \ } \ \ @@ -1112,10 +1082,10 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // [Stack] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name); + ASMJIT_API virtual Error _newStack(BaseMem* mem, uint32_t size, uint32_t alignment, const char* name) noexcept; //! Create a new memory chunk allocated on the current function's stack. - ASMJIT_INLINE X86Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) { + ASMJIT_INLINE X86Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) noexcept { X86Mem m(NoInit); _newStack(&m, size, alignment, name); return m; @@ -1125,97 +1095,97 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // [Const] // -------------------------------------------------------------------------- - ASMJIT_API virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size); + ASMJIT_API virtual Error _newConst(BaseMem* mem, uint32_t scope, const void* data, size_t size) noexcept; //! Put data to a constant-pool and get a memory reference to it. - ASMJIT_INLINE X86Mem newConst(uint32_t scope, const void* data, size_t size) { + ASMJIT_INLINE X86Mem newConst(uint32_t scope, const void* data, size_t size) noexcept { X86Mem m(NoInit); _newConst(&m, scope, data, size); return m; } //! Put a BYTE `val` to a constant-pool. - ASMJIT_INLINE X86Mem newByteConst(uint32_t scope, uint8_t val) { return newConst(scope, &val, 1); } + ASMJIT_INLINE X86Mem newByteConst(uint32_t scope, uint8_t val) noexcept { return newConst(scope, &val, 1); } //! Put a WORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newWordConst(uint32_t scope, uint16_t val) { return newConst(scope, &val, 2); } + ASMJIT_INLINE X86Mem newWordConst(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); } //! Put a DWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newDWordConst(uint32_t scope, uint32_t val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE X86Mem newDWordConst(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); } //! Put a QWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newQWordConst(uint32_t scope, uint64_t val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE X86Mem newQWordConst(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); } //! Put a WORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newInt16Const(uint32_t scope, int16_t val) { return newConst(scope, &val, 2); } + ASMJIT_INLINE X86Mem newInt16Const(uint32_t scope, int16_t val) noexcept { return newConst(scope, &val, 2); } //! Put a WORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newUInt16Const(uint32_t scope, uint16_t val) { return newConst(scope, &val, 2); } + ASMJIT_INLINE X86Mem newUInt16Const(uint32_t scope, uint16_t val) noexcept { return newConst(scope, &val, 2); } //! Put a DWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newInt32Const(uint32_t scope, int32_t val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE X86Mem newInt32Const(uint32_t scope, int32_t val) noexcept { return newConst(scope, &val, 4); } //! Put a DWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newUInt32Const(uint32_t scope, uint32_t val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE X86Mem newUInt32Const(uint32_t scope, uint32_t val) noexcept { return newConst(scope, &val, 4); } //! Put a QWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newInt64Const(uint32_t scope, int64_t val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE X86Mem newInt64Const(uint32_t scope, int64_t val) noexcept { return newConst(scope, &val, 8); } //! Put a QWORD `val` to a constant-pool. - ASMJIT_INLINE X86Mem newUInt64Const(uint32_t scope, uint64_t val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE X86Mem newUInt64Const(uint32_t scope, uint64_t val) noexcept { return newConst(scope, &val, 8); } //! Put a SP-FP `val` to a constant-pool. - ASMJIT_INLINE X86Mem newFloatConst(uint32_t scope, float val) { return newConst(scope, &val, 4); } + ASMJIT_INLINE X86Mem newFloatConst(uint32_t scope, float val) noexcept { return newConst(scope, &val, 4); } //! Put a DP-FP `val` to a constant-pool. - ASMJIT_INLINE X86Mem newDoubleConst(uint32_t scope, double val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE X86Mem newDoubleConst(uint32_t scope, double val) noexcept { return newConst(scope, &val, 8); } //! Put a MMX `val` to a constant-pool. - ASMJIT_INLINE X86Mem newMmConst(uint32_t scope, const Vec64& val) { return newConst(scope, &val, 8); } + ASMJIT_INLINE X86Mem newMmConst(uint32_t scope, const Vec64& val) noexcept { return newConst(scope, &val, 8); } //! Put a XMM `val` to a constant-pool. - ASMJIT_INLINE X86Mem newXmmConst(uint32_t scope, const Vec128& val) { return newConst(scope, &val, 16); } + ASMJIT_INLINE X86Mem newXmmConst(uint32_t scope, const Vec128& val) noexcept { return newConst(scope, &val, 16); } //! Put a YMM `val` to a constant-pool. - ASMJIT_INLINE X86Mem newYmmConst(uint32_t scope, const Vec256& val) { return newConst(scope, &val, 32); } + ASMJIT_INLINE X86Mem newYmmConst(uint32_t scope, const Vec256& val) noexcept { return newConst(scope, &val, 32); } // -------------------------------------------------------------------------- // [Embed] // -------------------------------------------------------------------------- //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE Error db(uint8_t x) { return embed(&x, 1); } + ASMJIT_INLINE Error db(uint8_t x) noexcept { return embed(&x, 1); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE Error dw(uint16_t x) { return embed(&x, 2); } + ASMJIT_INLINE Error dw(uint16_t x) noexcept { return embed(&x, 2); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE Error dd(uint32_t x) { return embed(&x, 4); } + ASMJIT_INLINE Error dd(uint32_t x) noexcept { return embed(&x, 4); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE Error dq(uint64_t x) { return embed(&x, 8); } + ASMJIT_INLINE Error dq(uint64_t x) noexcept { return embed(&x, 8); } //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE Error dint8(int8_t x) { return embed(&x, static_cast(sizeof(int8_t))); } + ASMJIT_INLINE Error dint8(int8_t x) noexcept { return embed(&x, static_cast(sizeof(int8_t))); } //! Add 8-bit integer data to the instruction stream. - ASMJIT_INLINE Error duint8(uint8_t x) { return embed(&x, static_cast(sizeof(uint8_t))); } + ASMJIT_INLINE Error duint8(uint8_t x) noexcept { return embed(&x, static_cast(sizeof(uint8_t))); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE Error dint16(int16_t x) { return embed(&x, static_cast(sizeof(int16_t))); } + ASMJIT_INLINE Error dint16(int16_t x) noexcept { return embed(&x, static_cast(sizeof(int16_t))); } //! Add 16-bit integer data to the instruction stream. - ASMJIT_INLINE Error duint16(uint16_t x) { return embed(&x, static_cast(sizeof(uint16_t))); } + ASMJIT_INLINE Error duint16(uint16_t x) noexcept { return embed(&x, static_cast(sizeof(uint16_t))); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE Error dint32(int32_t x) { return embed(&x, static_cast(sizeof(int32_t))); } + ASMJIT_INLINE Error dint32(int32_t x) noexcept { return embed(&x, static_cast(sizeof(int32_t))); } //! Add 32-bit integer data to the instruction stream. - ASMJIT_INLINE Error duint32(uint32_t x) { return embed(&x, static_cast(sizeof(uint32_t))); } + ASMJIT_INLINE Error duint32(uint32_t x) noexcept { return embed(&x, static_cast(sizeof(uint32_t))); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE Error dint64(int64_t x) { return embed(&x, static_cast(sizeof(int64_t))); } + ASMJIT_INLINE Error dint64(int64_t x) noexcept { return embed(&x, static_cast(sizeof(int64_t))); } //! Add 64-bit integer data to the instruction stream. - ASMJIT_INLINE Error duint64(uint64_t x) { return embed(&x, static_cast(sizeof(uint64_t))); } + ASMJIT_INLINE Error duint64(uint64_t x) noexcept { return embed(&x, static_cast(sizeof(uint64_t))); } //! Add float data to the instruction stream. - ASMJIT_INLINE Error dfloat(float x) { return embed(&x, static_cast(sizeof(float))); } + ASMJIT_INLINE Error dfloat(float x) noexcept { return embed(&x, static_cast(sizeof(float))); } //! Add double data to the instruction stream. - ASMJIT_INLINE Error ddouble(double x) { return embed(&x, static_cast(sizeof(double))); } + ASMJIT_INLINE Error ddouble(double x) noexcept { return embed(&x, static_cast(sizeof(double))); } //! Add Mm data to the instruction stream. - ASMJIT_INLINE Error dmm(const Vec64& x) { return embed(&x, static_cast(sizeof(Vec64))); } + ASMJIT_INLINE Error dmm(const Vec64& x) noexcept { return embed(&x, static_cast(sizeof(Vec64))); } //! Add Xmm data to the instruction stream. - ASMJIT_INLINE Error dxmm(const Vec128& x) { return embed(&x, static_cast(sizeof(Vec128))); } + ASMJIT_INLINE Error dxmm(const Vec128& x) noexcept { return embed(&x, static_cast(sizeof(Vec128))); } //! Add Ymm data to the instruction stream. - ASMJIT_INLINE Error dymm(const Vec256& x) { return embed(&x, static_cast(sizeof(Vec256))); } + ASMJIT_INLINE Error dymm(const Vec256& x) noexcept { return embed(&x, static_cast(sizeof(Vec256))); } //! Add data in a given structure instance to the instruction stream. template - ASMJIT_INLINE Error dstruct(const T& x) { return embed(&x, static_cast(sizeof(T))); } + ASMJIT_INLINE Error dstruct(const T& x) noexcept { return embed(&x, static_cast(sizeof(T))); } // ------------------------------------------------------------------------- // [Instruction Options] @@ -1224,13 +1194,13 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { ASMJIT_X86_EMIT_OPTIONS(X86Compiler) //! Force the compiler to not follow the conditional or unconditional jump. - ASMJIT_INLINE X86Compiler& unfollow() { + ASMJIT_INLINE X86Compiler& unfollow() noexcept { _instOptions |= kInstOptionUnfollow; return *this; } //! Tell the compiler that the destination variable will be overwritten. - ASMJIT_INLINE X86Compiler& overwrite() { + ASMJIT_INLINE X86Compiler& overwrite() noexcept { _instOptions |= kInstOptionOverwrite; return *this; } @@ -1264,232 +1234,232 @@ struct ASMJIT_VIRTAPI X86Compiler : public Compiler { // -------------------------------------------------------------------------- #define INST_0x(_Inst_, _Code_) \ - ASMJIT_INLINE HLInst* _Inst_() { \ + ASMJIT_INLINE HLInst* _Inst_() noexcept { \ return emit(_Code_); \ } #define INST_1x(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) noexcept { \ return emit(_Code_, o0); \ } #define INST_1i(_Inst_, _Code_, _Op0_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0) noexcept { \ return emit(_Code_, o0); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(int o0) { \ + ASMJIT_INLINE HLInst* _Inst_(int o0) noexcept { \ return emit(_Code_, o0); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(unsigned int o0) { \ + ASMJIT_INLINE HLInst* _Inst_(unsigned int o0) noexcept { \ return emit(_Code_, static_cast(o0)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(int64_t o0) { \ + ASMJIT_INLINE HLInst* _Inst_(int64_t o0) noexcept { \ return emit(_Code_, static_cast(o0)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(uint64_t o0) { \ + ASMJIT_INLINE HLInst* _Inst_(uint64_t o0) noexcept { \ return emit(_Code_, o0); \ } #define INST_1cc(_Inst_, _Code_, _Translate_, _Op0_) \ - ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0) { \ + ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0) noexcept { \ return emit(_Translate_(cc), o0); \ } \ \ - ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0) { return emit(_Code_##a, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0) { return emit(_Code_##ae, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0) { return emit(_Code_##b, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0) { return emit(_Code_##be, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0) { return emit(_Code_##c, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0) { return emit(_Code_##e, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0) { return emit(_Code_##g, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0) { return emit(_Code_##ge, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0) { return emit(_Code_##l, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0) { return emit(_Code_##le, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0) { return emit(_Code_##na, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0) { return emit(_Code_##nae, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0) { return emit(_Code_##nb, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0) { return emit(_Code_##nbe, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0) { return emit(_Code_##nc, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0) { return emit(_Code_##ne, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0) { return emit(_Code_##ng, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0) { return emit(_Code_##nge, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0) { return emit(_Code_##nl, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0) { return emit(_Code_##nle, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0) { return emit(_Code_##no, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0) { return emit(_Code_##np, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0) { return emit(_Code_##ns, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0) { return emit(_Code_##nz, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0) { return emit(_Code_##o, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0) { return emit(_Code_##p, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0) { return emit(_Code_##pe, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0) { return emit(_Code_##po, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0) { return emit(_Code_##s, o0); } \ - ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0) { return emit(_Code_##z, o0); } + ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0) noexcept { return emit(_Code_##a, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0) noexcept { return emit(_Code_##ae, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0) noexcept { return emit(_Code_##b, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0) noexcept { return emit(_Code_##be, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0) noexcept { return emit(_Code_##c, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0) noexcept { return emit(_Code_##e, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0) noexcept { return emit(_Code_##g, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0) noexcept { return emit(_Code_##ge, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0) noexcept { return emit(_Code_##l, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0) noexcept { return emit(_Code_##le, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0) noexcept { return emit(_Code_##na, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0) noexcept { return emit(_Code_##nae, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0) noexcept { return emit(_Code_##nb, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0) noexcept { return emit(_Code_##nbe, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0) noexcept { return emit(_Code_##nc, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0) noexcept { return emit(_Code_##ne, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0) noexcept { return emit(_Code_##ng, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0) noexcept { return emit(_Code_##nge, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0) noexcept { return emit(_Code_##nl, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0) noexcept { return emit(_Code_##nle, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0) noexcept { return emit(_Code_##no, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0) noexcept { return emit(_Code_##np, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0) noexcept { return emit(_Code_##ns, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0) noexcept { return emit(_Code_##nz, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0) noexcept { return emit(_Code_##o, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0) noexcept { return emit(_Code_##p, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0) noexcept { return emit(_Code_##pe, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0) noexcept { return emit(_Code_##po, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0) noexcept { return emit(_Code_##s, o0); } \ + ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0) noexcept { return emit(_Code_##z, o0); } #define INST_2x(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) noexcept { \ return emit(_Code_, o0, o1); \ } #define INST_2i(_Inst_, _Code_, _Op0_, _Op1_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1) noexcept { \ return emit(_Code_, o0, o1); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1) noexcept { \ return emit(_Code_, o0, o1); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1) noexcept { \ return emit(_Code_, o0, static_cast(o1)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1) noexcept { \ return emit(_Code_, o0, static_cast(o1)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1) noexcept { \ return emit(_Code_, o0, o1); \ } #define INST_2cc(_Inst_, _Code_, _Translate_, _Op0_, _Op1_) \ - ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) { \ + ASMJIT_INLINE HLInst* _Inst_(uint32_t cc, const _Op0_& o0, const _Op1_& o1) noexcept { \ return emit(_Translate_(cc), o0, o1); \ } \ \ - ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##a, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ae, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##b, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##be, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##c, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##e, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##g, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ge, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##l, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##le, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##na, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nae, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nb, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nbe, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nc, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ne, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ng, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nge, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nl, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nle, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##no, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##np, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##ns, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##nz, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##o, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##p, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##pe, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##po, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##s, o0, o1); } \ - ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0, const _Op1_& o1) { return emit(_Code_##z, o0, o1); } + ASMJIT_INLINE HLInst* _Inst_##a(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##a, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ae(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##ae, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##b(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##b, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##be(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##be, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##c(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##c, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##e(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##e, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##g(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##g, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ge(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##ge, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##l(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##l, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##le(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##le, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##na(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##na, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nae(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nae, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nb(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nb, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nbe(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nbe, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nc(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nc, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ne(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##ne, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ng(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##ng, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nge(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nge, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nl(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nl, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nle(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nle, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##no(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##no, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##np(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##np, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##ns(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##ns, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##nz(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##nz, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##o(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##o, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##p(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##p, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##pe(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##pe, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##po(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##po, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##s(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##s, o0, o1); } \ + ASMJIT_INLINE HLInst* _Inst_##z(const _Op0_& o0, const _Op1_& o1) noexcept { return emit(_Code_##z, o0, o1); } #define INST_3x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) noexcept { \ return emit(_Code_, o0, o1, o2); \ } #define INST_3i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) noexcept { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2) noexcept { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2) noexcept { \ return emit(_Code_, o0, o1, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2) noexcept { \ return emit(_Code_, o0, o1, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2) noexcept { \ return emit(_Code_, o0, o1, o2); \ } #define INST_3ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2) noexcept { \ return emit(_Code_, o0, o1, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1, int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int o1, int o2) noexcept { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, o2); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, unsigned int o1, unsigned int o2) noexcept { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, int64_t o1, int64_t o2) noexcept { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, static_cast(o2)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, uint64_t o1, uint64_t o2) noexcept { \ Imm o1Imm(o1); \ return emit(_Code_, o0, o1Imm, o2); \ } #define INST_4x(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) noexcept { \ return emit(_Code_, o0, o1, o2, o3); \ } #define INST_4i(_Inst_, _Code_, _Op0_, _Op1_, _Op2_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) noexcept { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int o3) noexcept { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, unsigned int o3) noexcept { \ return emit(_Code_, o0, o1, o2, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, int64_t o3) noexcept { \ return emit(_Code_, o0, o1, o2, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, uint64_t o3) noexcept { \ return emit(_Code_, o0, o1, o2, o3); \ } #define INST_4ii(_Inst_, _Code_, _Op0_, _Op1_, _Op2_, _Op3_) \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, const _Op2_& o2, const _Op3_& o3) noexcept { \ return emit(_Code_, o0, o1, o2, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int o2, int o3) noexcept { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, o3); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, unsigned int o2, unsigned int o3) noexcept { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, int64_t o2, int64_t o3) noexcept { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, static_cast(o3)); \ } \ /*! \overload */ \ - ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) { \ + ASMJIT_INLINE HLInst* _Inst_(const _Op0_& o0, const _Op1_& o1, uint64_t o2, uint64_t o3) noexcept { \ Imm o2Imm(o2); \ return emit(_Code_, o0, o1, o2Imm, o3); \ } diff --git a/src/asmjit/x86/x86compilercontext.cpp b/src/asmjit/x86/x86compilercontext.cpp index e271cb6..3e4d89d 100644 --- a/src/asmjit/x86/x86compilercontext.cpp +++ b/src/asmjit/x86/x86compilercontext.cpp @@ -13,12 +13,11 @@ // [Dependencies - AsmJit] #include "../base/containers.h" +#include "../base/cpuinfo.h" #include "../base/utils.h" #include "../x86/x86assembler.h" #include "../x86/x86compiler.h" #include "../x86/x86compilercontext_p.h" -#include "../x86/x86cpuinfo.h" -#include "../x86/x86scheduler_p.h" // [Api-Begin] #include "../apibegin.h" @@ -38,9 +37,9 @@ static Error X86Context_translateOperands(X86Context* self, Operand* opList, uin // Getting `VarClass` is the only safe operation when dealing with denormalized // `varType`. Any other property would require to map vType to the architecture // specific type. -static ASMJIT_INLINE uint32_t x86VarTypeToClass(uint32_t vType) { +static ASMJIT_INLINE uint32_t x86VarTypeToClass(uint32_t vType) noexcept { ASMJIT_ASSERT(vType < kX86VarTypeCount); - return _x86VarInfo[vType].getClass(); + return _x86VarInfo[vType].getRegClass(); } // ============================================================================ @@ -58,7 +57,7 @@ static void X86Context_annotateVariable(X86Context* self, } else { sb.appendChar('v'); - sb.appendUInt(vd->getId() & kOperandIdNum); + sb.appendUInt(vd->getId() & Operand::kIdIndexMask); } } @@ -114,7 +113,7 @@ static void X86Context_annotateOperand(X86Context* self, sb.appendChar(prefix); // TODO: Enable again: - // if ((loggerOptions & (1 << kLoggerOptionHexDisplacement)) != 0 && dispOffset > 9) { + // if ((loggerOptions & (Logger::kOptionHexDisplacement)) != 0 && dispOffset > 9) { // sb.appendString("0x", 2); // base = 16; // } @@ -128,7 +127,7 @@ static void X86Context_annotateOperand(X86Context* self, int64_t val = i->getInt64(); /* - if ((loggerOptions & (1 << kLoggerOptionHexImmediate)) && static_cast(val) > 9) + if ((loggerOptions & (1 << Logger::kOptionHexImmediate)) && static_cast(val) > 9) sb.appendUInt(static_cast(val), 16); else*/ sb.appendInt(val, 10); @@ -161,7 +160,7 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr StringBuilderTmp<256> sb; switch (node_->getType()) { - case kHLNodeTypeAlign: { + case HLNode::kTypeAlign: { HLAlign* node = static_cast(node_); sb.appendFormat(".align %u (%s)", node->getOffset(), @@ -169,19 +168,19 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr break; } - case kHLNodeTypeData: { + case HLNode::kTypeData: { HLData* node = static_cast(node_); sb.appendFormat(".embed (%u bytes)", node->getSize()); break; } - case kHLNodeTypeComment: { + case HLNode::kTypeComment: { HLComment* node = static_cast(node_); sb.appendFormat("; %s", node->getComment()); break; } - case kHLNodeTypeHint: { + case HLNode::kTypeHint: { HLHint* node = static_cast(node_); static const char* hint[16] = { "alloc", @@ -195,7 +194,7 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr break; } - case kHLNodeTypeLabel: { + case HLNode::kTypeLabel: { HLLabel* node = static_cast(node_); sb.appendFormat("L%u: (NumRefs=%u)", node->getLabelId(), @@ -203,38 +202,38 @@ static void X86Context_traceNode(X86Context* self, HLNode* node_, const char* pr break; } - case kHLNodeTypeInst: { + case HLNode::kTypeInst: { HLInst* node = static_cast(node_); X86Context_annotateInstruction(self, sb, node->getInstId(), node->getOpList(), node->getOpCount()); break; } - case kHLNodeTypeFunc: { + case HLNode::kTypeFunc: { HLFunc* node = static_cast(node_); sb.appendFormat("[func]"); break; } - case kHLNodeTypeSentinel: { + case HLNode::kTypeSentinel: { HLSentinel* node = static_cast(node_); sb.appendFormat("[end]"); break; } - case kHLNodeTypeRet: { + case HLNode::kTypeRet: { HLRet* node = static_cast(node_); sb.appendFormat("[ret]"); break; } - case kHLNodeTypeCall: { + case HLNode::kTypeCall: { HLCall* node = static_cast(node_); sb.appendFormat("[call]"); break; } - case kHLNodeTypeCallArg: { + case HLNode::kTypeCallArg: { HLCallArg* node = static_cast(node_); sb.appendFormat("[sarg]"); break; @@ -2006,14 +2005,14 @@ static ASMJIT_INLINE Error X86Context_insertHLCallArg( sArgCount++; } - const X86VarInfo& sInfo = _x86VarInfo[sType]; - uint32_t sClass = sInfo.getClass(); + const VarInfo& sInfo = _x86VarInfo[sType]; + uint32_t sClass = sInfo.getRegClass(); if (X86Context_mustConvertSArg(self, aType, sType)) { uint32_t cType = X86Context_typeOfConvertedSArg(self, aType, sType); - const X86VarInfo& cInfo = _x86VarInfo[cType]; - uint32_t cClass = cInfo.getClass(); + const VarInfo& cInfo = _x86VarInfo[cType]; + uint32_t cClass = cInfo.getRegClass(); while (++i < sArgCount) { sArgData = &sArgList[i]; @@ -2027,7 +2026,7 @@ static ASMJIT_INLINE Error X86Context_insertHLCallArg( return kErrorOk; } - VarData* cVd = compiler->_newVd(cType, cInfo.getSize(), cInfo.getClass(), nullptr); + VarData* cVd = compiler->_newVd(cInfo, nullptr); if (cVd == nullptr) return kErrorNoHeapMemory; @@ -2289,15 +2288,15 @@ _NextGroup: // [Align/Embed] // ---------------------------------------------------------------------- - case kHLNodeTypeAlign: - case kHLNodeTypeData: + case HLNode::kTypeAlign: + case HLNode::kTypeData: break; // ---------------------------------------------------------------------- // [Hint] // ---------------------------------------------------------------------- - case kHLNodeTypeHint: { + case HLNode::kTypeHint: { HLHint* node = static_cast(node_); VI_BEGIN(); @@ -2351,7 +2350,7 @@ _NextGroup: compiler->removeNode(cur); cur = static_cast(node->getNext()); - if (cur == nullptr || cur->getType() != kHLNodeTypeHint || cur->getHint() != kVarHintAlloc) + if (cur == nullptr || cur->getType() != HLNode::kTypeHint || cur->getHint() != kVarHintAlloc) break; } @@ -2389,7 +2388,7 @@ _NextGroup: // [Target] // ---------------------------------------------------------------------- - case kHLNodeTypeLabel: { + case HLNode::kTypeLabel: { if (node_ == func->getExitNode()) { ASMJIT_PROPAGATE_ERROR(addReturningNode(node_)); goto _NextGroup; @@ -2401,7 +2400,7 @@ _NextGroup: // [Inst] // ---------------------------------------------------------------------- - case kHLNodeTypeInst: { + case HLNode::kTypeInst: { HLInst* node = static_cast(node_); uint32_t instId = node->getInstId(); @@ -2417,10 +2416,10 @@ _NextGroup: // Collect instruction flags and merge all 'VarAttr's. if (extendedInfo.isFp()) - flags |= kHLNodeFlagIsFp; + flags |= HLNode::kFlagIsFp; if (extendedInfo.isSpecial() && (special = X86SpecialInst_get(instId, opList, opCount)) != nullptr) - flags |= kHLNodeFlagIsSpecial; + flags |= HLNode::kFlagIsSpecial; uint32_t gpAllowedMask = 0xFFFFFFFF; @@ -2656,11 +2655,11 @@ _NextGroup: if (jTarget->isFetched()) { uint32_t jTargetFlowId = jTarget->getFlowId(); - // Update kHLNodeFlagIsTaken flag to true if this is a conditional - // backward jump. This behavior can be overridden by using - // `kInstOptionTaken` when the instruction is created. + // Update HLNode::kFlagIsTaken flag to true if this is a + // conditional backward jump. This behavior can be overridden + // by using `kInstOptionTaken` when the instruction is created. if (!jNode->isTaken() && opCount == 1 && jTargetFlowId <= flowId) { - jNode->orFlags(kHLNodeFlagIsTaken); + jNode->orFlags(HLNode::kFlagIsTaken); } } else if (next->isFetched()) { @@ -2681,7 +2680,7 @@ _NextGroup: // [Func] // ---------------------------------------------------------------------- - case kHLNodeTypeFunc: { + case HLNode::kTypeFunc: { ASMJIT_ASSERT(node_ == func); X86FuncDecl* decl = func->getDecl(); @@ -2732,7 +2731,7 @@ _NextGroup: // [End] // ---------------------------------------------------------------------- - case kHLNodeTypeSentinel: { + case HLNode::kTypeSentinel: { ASMJIT_PROPAGATE_ERROR(addReturningNode(node_)); goto _NextGroup; } @@ -2741,7 +2740,7 @@ _NextGroup: // [Ret] // ---------------------------------------------------------------------- - case kHLNodeTypeRet: { + case HLNode::kTypeRet: { HLRet* node = static_cast(node_); ASMJIT_PROPAGATE_ERROR(addReturningNode(node)); @@ -2788,7 +2787,7 @@ _NextGroup: // [Call] // ---------------------------------------------------------------------- - case kHLNodeTypeCall: { + case HLNode::kTypeCall: { X86CallNode* node = static_cast(node_); X86FuncDecl* decl = node->getDecl(); @@ -2962,7 +2961,7 @@ Error X86Context::annotate() { uint32_t maxLen = 0; while (node_ != end) { if (node_->getComment() == nullptr) { - if (node_->getType() == kHLNodeTypeInst) { + if (node_->getType() == HLNode::kTypeInst) { HLInst* node = static_cast(node_); X86Context_annotateInstruction(this, sb, node->getInstId(), node->getOpList(), node->getOpCount()); @@ -3277,11 +3276,11 @@ ASMJIT_INLINE Error X86VarAlloc::run(HLNode* node_) { alloc(); // Translate node operands. - if (node_->getType() == kHLNodeTypeInst) { + if (node_->getType() == HLNode::kTypeInst) { HLInst* node = static_cast(node_); ASMJIT_PROPAGATE_ERROR(X86Context_translateOperands(_context, node->getOpList(), node->getOpCount())); } - else if (node_->getType() == kHLNodeTypeCallArg) { + else if (node_->getType() == HLNode::kTypeCallArg) { HLCallArg* node = static_cast(node_); X86CallNode* call = static_cast(node->getCall()); @@ -3895,11 +3894,11 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe _Advance: // Terminate if this is a return node. - if (node->hasFlag(kHLNodeFlagIsRet)) + if (node->hasFlag(HLNode::kFlagIsRet)) goto _Done; // Advance on non-conditional jump. - if (node->hasFlag(kHLNodeFlagIsJmp)) { + if (node->hasFlag(HLNode::kFlagIsJmp)) { // Stop on a jump that is not followed. node = static_cast(node)->getTarget(); if (node == nullptr) @@ -3908,7 +3907,7 @@ _Advance: } // Split flow on a conditional jump. - if (node->hasFlag(kHLNodeFlagIsJcc)) { + if (node->hasFlag(HLNode::kFlagIsJcc)) { // Put the next node on the stack and follow the target if possible. HLNode* next = node->getNext(); if (next != nullptr && gfIndex < kMaxGuessFlow) @@ -3972,15 +3971,15 @@ ASMJIT_INLINE uint32_t X86VarAlloc::guessAlloc(VarData* vd, uint32_t allocableRe break; // Stop on `HLSentinel` and `HLRet`. - if (node->hasFlag(kHLNodeFlagIsRet)) + if (node->hasFlag(HLNode::kFlagIsRet)) break; // Stop on conditional jump, we don't follow them. - if (node->hasFlag(kHLNodeFlagIsJcc)) + if (node->hasFlag(HLNode::kFlagIsJcc)) break; // Advance on non-conditional jump. - if (node->hasFlag(kHLNodeFlagIsJmp)) { + if (node->hasFlag(HLNode::kFlagIsJmp)) { node = static_cast(node)->getTarget(); // Stop on jump that is not followed. if (node == nullptr) @@ -4580,15 +4579,15 @@ ASMJIT_INLINE uint32_t X86CallAlloc::guessAlloc(VarData* vd, uint32_t allocableR HLNode* node = _node; for (i = 0; i < maxLookAhead; i++) { // Stop on 'HLRet' and 'HLSentinel. - if (node->hasFlag(kHLNodeFlagIsRet)) + if (node->hasFlag(HLNode::kFlagIsRet)) break; // Stop on conditional jump, we don't follow them. - if (node->hasFlag(kHLNodeFlagIsJcc)) + if (node->hasFlag(HLNode::kFlagIsJcc)) break; // Advance on non-conditional jump. - if (node->hasFlag(kHLNodeFlagIsJmp)) { + if (node->hasFlag(HLNode::kFlagIsJmp)) { node = static_cast(node)->getTarget(); // Stop on jump that is not followed. if (node == nullptr) @@ -4707,7 +4706,7 @@ ASMJIT_INLINE void X86CallAlloc::ret() { continue; VarData* vd = _compiler->getVdById(op->getId()); - uint32_t vf = _x86VarInfo[vd->getType()].getDesc(); + uint32_t vf = _x86VarInfo[vd->getType()].getFlags(); uint32_t regIndex = ret.getRegIndex(); switch (vd->getClass()) { @@ -4729,8 +4728,8 @@ ASMJIT_INLINE void X86CallAlloc::ret() { if (ret.getVarType() == kVarTypeFp32 || ret.getVarType() == kVarTypeFp64) { X86Mem m = _context->getVarMem(vd); m.setSize( - (vf & kVarFlagSp) ? 4 : - (vf & kVarFlagDp) ? 8 : + (vf & VarInfo::kFlagSP) ? 4 : + (vf & VarInfo::kFlagDP) ? 8 : (ret.getVarType() == kVarTypeFp32) ? 4 : 8); _context->unuse(vd, kVarStateMem); @@ -4765,7 +4764,7 @@ static Error X86Context_translateOperands(X86Context* self, Operand* opList, uin ASMJIT_ASSERT(vd != nullptr); ASMJIT_ASSERT(vd->getRegIndex() != kInvalidReg); - op->_vreg.op = kOperandTypeReg; + op->_vreg.op = Operand::kTypeReg; op->_vreg.index = vd->getRegIndex(); } else if (op->isMem()) { @@ -5015,7 +5014,7 @@ static Error X86Context_patchFuncMem(X86Context* self, X86FuncNode* func, HLNode HLNode* node = func; do { - if (node->getType() == kHLNodeTypeInst) { + if (node->getType() == HLNode::kTypeInst) { HLInst* iNode = static_cast(node); if (iNode->hasMemOp()) { @@ -5336,10 +5335,10 @@ static Error X86Context_translateRet(X86Context* self, HLRet* rNode, HLLabel* ex VarData* vd = va.getVd(); X86Mem m(self->getVarMem(vd)); - uint32_t flags = _x86VarInfo[vd->getType()].getDesc(); + uint32_t flags = _x86VarInfo[vd->getType()].getFlags(); m.setSize( - (flags & kVarFlagSp) ? 4 : - (flags & kVarFlagDp) ? 8 : + (flags & VarInfo::kFlagSP) ? 4 : + (flags & VarInfo::kFlagDP) ? 8 : va.hasFlag(kVarAttrX86Fld4) ? 4 : 8); compiler->fld(m); @@ -5352,29 +5351,29 @@ static Error X86Context_translateRet(X86Context* self, HLRet* rNode, HLLabel* ex switch (node->getType()) { // If we have found an exit label we just return, there is no need to // emit jump to that. - case kHLNodeTypeLabel: + case HLNode::kTypeLabel: if (static_cast(node) == exitTarget) return kErrorOk; goto _EmitRet; - case kHLNodeTypeData: - case kHLNodeTypeInst: - case kHLNodeTypeCall: - case kHLNodeTypeRet: + case HLNode::kTypeData: + case HLNode::kTypeInst: + case HLNode::kTypeCall: + case HLNode::kTypeRet: goto _EmitRet; // Continue iterating. - case kHLNodeTypeComment: - case kHLNodeTypeAlign: - case kHLNodeTypeHint: + case HLNode::kTypeComment: + case HLNode::kTypeAlign: + case HLNode::kTypeHint: break; // Invalid node to be here. - case kHLNodeTypeFunc: + case HLNode::kTypeFunc: return self->getCompiler()->setLastError(kErrorInvalidState); // We can't go forward from here. - case kHLNodeTypeSentinel: + case HLNode::kTypeSentinel: return kErrorOk; } @@ -5413,7 +5412,7 @@ Error X86Context::translate() { for (;;) { while (node_->isTranslated()) { // Switch state if we went to the already translated node. - if (node_->getType() == kHLNodeTypeLabel) { + if (node_->getType() == HLNode::kTypeLabel) { HLLabel* node = static_cast(node_); compiler->_setCursor(node->getPrev()); switchState(node->getState()); @@ -5448,7 +5447,7 @@ _NextGroup: } next = node_->getNext(); - node_->orFlags(kHLNodeFlagIsTranslated); + node_->orFlags(HLNode::kFlagIsTranslated); ASMJIT_TSEC({ X86Context_traceNode(this, node_, "[T] "); @@ -5459,15 +5458,15 @@ _NextGroup: // [Align / Embed] // ---------------------------------------------------------------------- - case kHLNodeTypeAlign: - case kHLNodeTypeData: + case HLNode::kTypeAlign: + case HLNode::kTypeData: break; // ---------------------------------------------------------------------- // [Target] // ---------------------------------------------------------------------- - case kHLNodeTypeLabel: { + case HLNode::kTypeLabel: { HLLabel* node = static_cast(node_); ASMJIT_ASSERT(!node->hasState()); node->setState(saveState()); @@ -5478,9 +5477,9 @@ _NextGroup: // [Inst/Call/SArg/Ret] // ---------------------------------------------------------------------- - case kHLNodeTypeInst: - case kHLNodeTypeCall: - case kHLNodeTypeCallArg: + case HLNode::kTypeInst: + case HLNode::kTypeCall: + case HLNode::kTypeCallArg: // Update VarAttr's unuse flags based on liveness of the next node. if (!node_->isJcc()) { X86VarMap* map = static_cast(node_->getMap()); @@ -5500,14 +5499,14 @@ _NextGroup: } } - if (node_->getType() == kHLNodeTypeCall) { + if (node_->getType() == HLNode::kTypeCall) { ASMJIT_PROPAGATE_ERROR(cAlloc.run(static_cast(node_))); break; } ASMJIT_FALLTHROUGH; - case kHLNodeTypeHint: - case kHLNodeTypeRet: { + case HLNode::kTypeHint: + case HLNode::kTypeRet: { ASMJIT_PROPAGATE_ERROR(vAlloc.run(node_)); // Handle conditional/unconditional jump. @@ -5539,7 +5538,7 @@ _NextGroup: if (jTarget->isTranslated()) { if (jNext->isTranslated()) { - ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel); + ASMJIT_ASSERT(jNext->getType() == HLNode::kTypeLabel); compiler->_setCursor(node->getPrev()); intersectStates(jTarget->getState(), jNext->getState()); } @@ -5551,7 +5550,7 @@ _NextGroup: next = jNext; } else if (jNext->isTranslated()) { - ASMJIT_ASSERT(jNext->getType() == kHLNodeTypeLabel); + ASMJIT_ASSERT(jNext->getType() == HLNode::kTypeLabel); VarState* savedState = saveState(); node->setState(savedState); @@ -5577,7 +5576,7 @@ _NextGroup: // [Func] // ---------------------------------------------------------------------- - case kHLNodeTypeFunc: { + case HLNode::kTypeFunc: { ASMJIT_ASSERT(node_ == func); X86FuncDecl* decl = func->getDecl(); @@ -5626,7 +5625,7 @@ _NextGroup: // [End] // ---------------------------------------------------------------------- - case kHLNodeTypeSentinel: { + case HLNode::kTypeSentinel: { goto _NextGroup; } @@ -5648,115 +5647,6 @@ _Done: return kErrorOk; } -// ============================================================================ -// [asmjit::X86Context - Schedule] -// ============================================================================ - -Error X86Context::schedule() { - X86Compiler* compiler = getCompiler(); - X86Scheduler scheduler(compiler, - static_cast(compiler->getRuntime()->getCpuInfo())); - - HLNode* node_ = getFunc(); - HLNode* stop = getStop(); - ASMJIT_UNUSED(stop); // Unused in release mode. - - PodList::Link* jLink = _jccList.getFirst(); - - // -------------------------------------------------------------------------- - // [Loop] - // -------------------------------------------------------------------------- - -_Advance: - while (node_->isScheduled()) { -_NextGroup: - if (jLink == nullptr) - goto _Done; - - // We always go to the next instruction in the main loop so we have to - // jump to the `jcc` target here. - node_ = static_cast(jLink->getValue())->getTarget(); - jLink = jLink->getNext(); - } - - // Find interval that can be passed to scheduler. - for (;;) { - HLNode* schedStart = node_; - - for (;;) { - HLNode* next = node_->getNext(); - node_->orFlags(kHLNodeFlagIsScheduled); - - // Shouldn't happen here, investigate if hit. - ASMJIT_ASSERT(node_ != stop); - - uint32_t nodeType = node_->getType(); - if (nodeType != kHLNodeTypeInst) { - // If we didn't reach any instruction node we simply advance. In this - // case no informative nodes will be removed and everything else just - // skipped. - if (schedStart == node_) { - node_ = next; - if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet) - goto _NextGroup; - else - goto _Advance; - } - - // Remove informative nodes if we are in a middle of instruction stream. - // - // TODO: Shouldn't be there an option for this? Maybe it can be useful - // to stop if there is a comment or something. I'm not sure if it's - // good to always remove. - if (node_->isInformative()) { - compiler->removeNode(node_); - node_ = next; - continue; - } - - break; - } - - // Stop if `node_` is `jmp` or `jcc`. - if (node_->isJmpOrJcc()) - break; - - node_ = next; - } - - // If the stream is less than 3 instructions it will not be passed to - // scheduler. - if (schedStart != node_ && - schedStart->getNext() != node_ && - schedStart->getNext() != node_->getPrev()) { - - scheduler.run(schedStart, node_); - } - - // If node is `jmp` we follow it as well. - if (node_->isJmp()) { - node_ = static_cast(node_)->getTarget(); - if (node_ == nullptr) - goto _NextGroup; - else - goto _Advance; - } - - // Handle stop nodes. - { - uint32_t nodeType = node_->getType(); - if (nodeType == kHLNodeTypeSentinel || nodeType == kHLNodeTypeRet) - goto _NextGroup; - } - - node_ = node_->getNext(); - goto _Advance; - } - -_Done: - return kErrorOk; -} - // ============================================================================ // [asmjit::X86Context - Serialize] // ============================================================================ @@ -5779,39 +5669,39 @@ Error X86Context::serialize(Assembler* assembler_, HLNode* start, HLNode* stop) #endif // !ASMJIT_DISABLE_LOGGER switch (node_->getType()) { - case kHLNodeTypeAlign: { + case HLNode::kTypeAlign: { HLAlign* node = static_cast(node_); assembler->align(node->getAlignMode(), node->getOffset()); break; } - case kHLNodeTypeData: { + case HLNode::kTypeData: { HLData* node = static_cast(node_); assembler->embed(node->getData(), node->getSize()); break; } - case kHLNodeTypeComment: { + case HLNode::kTypeComment: { #if !defined(ASMJIT_DISABLE_LOGGER) HLComment* node = static_cast(node_); if (logger) - logger->logFormat(kLoggerStyleComment, + logger->logFormat(Logger::kStyleComment, "%s; %s\n", logger->getIndentation(), node->getComment()); #endif // !ASMJIT_DISABLE_LOGGER break; } - case kHLNodeTypeHint: { + case HLNode::kTypeHint: { break; } - case kHLNodeTypeLabel: { + case HLNode::kTypeLabel: { HLLabel* node = static_cast(node_); assembler->bind(node->getLabel()); break; } - case kHLNodeTypeInst: { + case HLNode::kTypeInst: { HLInst* node = static_cast(node_); uint32_t instId = node->getInstId(); @@ -5970,15 +5860,15 @@ Error X86Context::serialize(Assembler* assembler_, HLNode* start, HLNode* stop) // Function scope and return is translated to another nodes, no special // handling is required at this point. - case kHLNodeTypeFunc: - case kHLNodeTypeSentinel: - case kHLNodeTypeRet: { + case HLNode::kTypeFunc: + case HLNode::kTypeSentinel: + case HLNode::kTypeRet: { break; } // Function call adds nodes before and after, but it's required to emit // the call instruction by itself. - case kHLNodeTypeCall: { + case HLNode::kTypeCall: { X86CallNode* node = static_cast(node_); assembler->emit(kX86InstIdCall, node->_target, noOperand, noOperand); break; diff --git a/src/asmjit/x86/x86compilercontext_p.h b/src/asmjit/x86/x86compilercontext_p.h index 8da3b7d..442d6bf 100644 --- a/src/asmjit/x86/x86compilercontext_p.h +++ b/src/asmjit/x86/x86compilercontext_p.h @@ -662,12 +662,6 @@ struct X86Context : public Context { virtual Error translate(); - // -------------------------------------------------------------------------- - // [Schedule] - // -------------------------------------------------------------------------- - - virtual Error schedule(); - // -------------------------------------------------------------------------- // [Serialize] // -------------------------------------------------------------------------- diff --git a/src/asmjit/x86/x86cpuinfo.cpp b/src/asmjit/x86/x86cpuinfo.cpp deleted file mode 100644 index 56babb4..0000000 --- a/src/asmjit/x86/x86cpuinfo.cpp +++ /dev/null @@ -1,401 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../build.h" -#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64) - -// [Dependencies - AsmJit] -#include "../base/utils.h" -#include "../x86/x86cpuinfo.h" - -// 2009-02-05: Thanks to Mike Tajmajer for VC7.1 compiler support. It shouldn't -// affect x64 compilation, because x64 compiler starts with VS2005 (VC8.0). -#if defined(_MSC_VER) && (_MSC_VER >= 1400) -#include -#endif // _MSC_VER >= 1400 - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::X86CpuVendor] -// ============================================================================ - -struct X86CpuVendor { - uint32_t id; - char text[12]; -}; - -static const X86CpuVendor x86CpuVendorList[] = { - { kCpuVendorIntel , { 'G', 'e', 'n', 'u', 'i', 'n', 'e', 'I', 'n', 't', 'e', 'l' } }, - { kCpuVendorAMD , { 'A', 'u', 't', 'h', 'e', 'n', 't', 'i', 'c', 'A', 'M', 'D' } }, - { kCpuVendorVIA , { 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 , 'V', 'I', 'A', 0 } }, - { kCpuVendorVIA , { 'C', 'e', 'n', 't', 'a', 'u', 'r', 'H', 'a', 'u', 'l', 's' } } -}; - -static ASMJIT_INLINE bool x86CpuVendorEq(const X86CpuVendor& info, const char* vendorString) { - const uint32_t* a = reinterpret_cast(info.text); - const uint32_t* b = reinterpret_cast(vendorString); - - return (a[0] == b[0]) & (a[1] == b[1]) & (a[2] == b[2]); -} - -static ASMJIT_INLINE void x86SimplifyBrandString(char* s) { - // Always clear the current character in the buffer. It ensures that there - // is no garbage after the string zero terminator. - char* d = s; - - char prev = 0; - char curr = s[0]; - s[0] = '\0'; - - for (;;) { - if (curr == 0) - break; - - if (curr == ' ') { - if (prev == '@' || s[1] == ' ' || s[1] == '@') - goto _Skip; - } - - d[0] = curr; - d++; - prev = curr; - -_Skip: - curr = *++s; - s[0] = '\0'; - } - - d[0] = '\0'; -} - -// ============================================================================ -// [asmjit::X86CpuUtil] -// ============================================================================ - -// This is messy, I know. Cpuid is implemented as intrinsic in VS2005, but -// we should support other compilers as well. Main problem is that MS compilers -// in 64-bit mode not allows to use inline assembler, so we need intrinsic and -// we need also asm version. - -union X86XCR { - uint64_t value; - - struct { - uint32_t eax; - uint32_t edx; - }; -}; - -// callCpuId() and detectCpuInfo() for x86 and x64 platforms begins here. -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 -void X86CpuUtil::_docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* result) { - -#if defined(_MSC_VER) -// __cpuidex was introduced by VS2008-SP1. -# if _MSC_FULL_VER >= 150030729 - __cpuidex(reinterpret_cast(result->i), inEax, inEcx); -# elif ASMJIT_ARCH_X64 - // VS2008 or less, 64-bit mode - `__cpuidex` doesn't exist! However, 64-bit - // calling convention specifies parameter to be passed in ECX/RCX, so we may - // be lucky if compiler doesn't move the register, otherwise the result is - // undefined. - __cpuid(reinterpret_cast(result->i), inEax); -# else - uint32_t cpuid_eax = inEax; - uint32_t cpuid_ecx = inEcx; - uint32_t* cpuid_out = result->i; - - __asm { - mov eax, cpuid_eax - mov ecx, cpuid_ecx - mov edi, cpuid_out - cpuid - mov dword ptr[edi + 0], eax - mov dword ptr[edi + 4], ebx - mov dword ptr[edi + 8], ecx - mov dword ptr[edi + 12], edx - } -# endif - -#elif defined(__GNUC__) -// Note, patched to preserve ebx/rbx register which is used by GCC. -# if ASMJIT_ARCH_X86 -# define __myCpuId(inEax, inEcx, outEax, outEbx, outEcx, outEdx) \ - __asm__ __volatile__( \ - "mov %%ebx, %%edi\n" \ - "cpuid\n" \ - "xchg %%edi, %%ebx\n" \ - : "=a" (outEax), "=D" (outEbx), "=c" (outEcx), "=d" (outEdx) \ - : "a" (inEax), "c" (inEcx)) -# else -# define __myCpuId(inEax, inEcx, outEax, outEbx, outEcx, outEdx) \ - __asm__ __volatile__( \ - "mov %%rbx, %%rdi\n" \ - "cpuid\n" \ - "xchg %%rdi, %%rbx\n" \ - : "=a" (outEax), "=D" (outEbx), "=c" (outEcx), "=d" (outEdx) \ - : "a" (inEax), "c" (inEcx)) -# endif - __myCpuId(inEax, inEcx, result->eax, result->ebx, result->ecx, result->edx); - -#else -# error "[asmjit] X86CpuUtil::_docpuid() unimplemented!" -#endif -} - -static void callXGetBV(X86XCR* result, uint32_t inEcx) { -#if defined(_MSC_VER) -# if (_MSC_FULL_VER >= 160040219) // 2010SP1+ - result->value = _xgetbv(inEcx); -# else - result->value = 0; -# endif -#elif defined(__GNUC__) - unsigned int eax, edx; - - // Removed, because the world is not perfect: - // __asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(inEcx)); - __asm__ __volatile__(".byte 0x0F, 0x01, 0xd0" : "=a"(eax), "=d"(edx) : "c"(inEcx)); - - result->eax = eax; - result->edx = edx; -#else - result->value = 0; -#endif // COMPILER -} - -void X86CpuUtil::detect(X86CpuInfo* cpuInfo) { - uint32_t i; - uint32_t maxBaseId; - - X86CpuId regs; - X86XCR xcr0; - xcr0.value = 0; - - // Clear everything except the '_size' member. - ::memset(reinterpret_cast(cpuInfo) + sizeof(uint32_t), - 0, sizeof(CpuInfo) - sizeof(uint32_t)); - - // Fill safe defaults. - cpuInfo->_hwThreadsCount = CpuInfo::detectHwThreadsCount(); - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x0] - // -------------------------------------------------------------------------- - - // Get vendor string/id. - callCpuId(®s, 0x0); - - maxBaseId = regs.eax; - ::memcpy(cpuInfo->_vendorString, ®s.ebx, 4); - ::memcpy(cpuInfo->_vendorString + 4, ®s.edx, 4); - ::memcpy(cpuInfo->_vendorString + 8, ®s.ecx, 4); - - for (i = 0; i < ASMJIT_ARRAY_SIZE(x86CpuVendorList); i++) { - if (x86CpuVendorEq(x86CpuVendorList[i], cpuInfo->_vendorString)) { - cpuInfo->_vendorId = x86CpuVendorList[i].id; - break; - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x1] - // -------------------------------------------------------------------------- - - if (maxBaseId >= 0x1) { - // Get feature flags in ECX/EDX and family/model in EAX. - callCpuId(®s, 0x1); - - // Fill family and model fields. - cpuInfo->_family = (regs.eax >> 8) & 0x0F; - cpuInfo->_model = (regs.eax >> 4) & 0x0F; - cpuInfo->_stepping = (regs.eax ) & 0x0F; - - // Use extended family and model fields. - if (cpuInfo->_family == 0x0F) { - cpuInfo->_family += ((regs.eax >> 20) & 0xFF); - cpuInfo->_model += ((regs.eax >> 16) & 0x0F) << 4; - } - - cpuInfo->_processorType = ((regs.eax >> 12) & 0x03); - cpuInfo->_brandIndex = ((regs.ebx ) & 0xFF); - cpuInfo->_flushCacheLineSize = ((regs.ebx >> 8) & 0xFF) * 8; - cpuInfo->_maxLogicalProcessors = ((regs.ebx >> 16) & 0xFF); - - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureSSE3); - if (regs.ecx & 0x00000002U) cpuInfo->addFeature(kX86CpuFeaturePCLMULQDQ); - if (regs.ecx & 0x00000008U) cpuInfo->addFeature(kX86CpuFeatureMONITOR); - if (regs.ecx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureSSSE3); - if (regs.ecx & 0x00002000U) cpuInfo->addFeature(kX86CpuFeatureCMPXCHG16B); - if (regs.ecx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureSSE4_1); - if (regs.ecx & 0x00100000U) cpuInfo->addFeature(kX86CpuFeatureSSE4_2); - if (regs.ecx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMOVBE); - if (regs.ecx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeaturePOPCNT); - if (regs.ecx & 0x02000000U) cpuInfo->addFeature(kX86CpuFeatureAESNI); - if (regs.ecx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE); - if (regs.ecx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureXSAVE_OS); - if (regs.ecx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeatureRDRAND); - - if (regs.edx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureRDTSC); - if (regs.edx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeatureCMPXCHG8B); - if (regs.edx & 0x00008000U) cpuInfo->addFeature(kX86CpuFeatureCMOV); - if (regs.edx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSH); - if (regs.edx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureMMX); - if (regs.edx & 0x01000000U) cpuInfo->addFeature(kX86CpuFeatureFXSR); - if (regs.edx & 0x02000000U) cpuInfo->addFeature(kX86CpuFeatureSSE).addFeature(kX86CpuFeatureMMX2); - if (regs.edx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureSSE).addFeature(kX86CpuFeatureSSE2); - if (regs.edx & 0x10000000U) cpuInfo->addFeature(kX86CpuFeatureMT); - - // AMD sets Multithreading to ON if it has two or more cores. - if (cpuInfo->_hwThreadsCount == 1 && cpuInfo->_vendorId == kCpuVendorAMD && (regs.edx & 0x10000000U)) { - cpuInfo->_hwThreadsCount = 2; - } - - // Get the content of XCR0 if supported by CPU and enabled by OS. - if ((regs.ecx & 0x0C000000U) == 0x0C000000U) { - callXGetBV(&xcr0, 0); - } - - // Detect AVX+. - if (regs.ecx & 0x10000000U) { - // - XCR0[2:1] == 11b - // XMM & YMM states are enabled by OS. - if ((xcr0.eax & 0x00000006U) == 0x00000006U) { - cpuInfo->addFeature(kX86CpuFeatureAVX); - - if (regs.ecx & 0x00000800U) cpuInfo->addFeature(kX86CpuFeatureXOP); - if (regs.ecx & 0x00004000U) cpuInfo->addFeature(kX86CpuFeatureFMA3); - if (regs.ecx & 0x00010000U) cpuInfo->addFeature(kX86CpuFeatureFMA4); - if (regs.ecx & 0x20000000U) cpuInfo->addFeature(kX86CpuFeatureF16C); - } - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x7 ECX=0x0] - // -------------------------------------------------------------------------- - - // Detect new features if the processor supports CPUID-07. - bool maybeMPX = false; - if (maxBaseId >= 0x7) { - callCpuId(®s, 0x7); - - if (regs.ebx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureFSGSBASE); - if (regs.ebx & 0x00000008U) cpuInfo->addFeature(kX86CpuFeatureBMI); - if (regs.ebx & 0x00000010U) cpuInfo->addFeature(kX86CpuFeatureHLE); - if (regs.ebx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeatureBMI2); - if (regs.ebx & 0x00000200U) cpuInfo->addFeature(kX86CpuFeatureMOVSBSTOSB_OPT); - if (regs.ebx & 0x00000800U) cpuInfo->addFeature(kX86CpuFeatureRTM); - if (regs.ebx & 0x00004000U) maybeMPX = true; - if (regs.ebx & 0x00040000U) cpuInfo->addFeature(kX86CpuFeatureRDSEED); - if (regs.ebx & 0x00080000U) cpuInfo->addFeature(kX86CpuFeatureADX); - if (regs.ebx & 0x00800000U) cpuInfo->addFeature(kX86CpuFeatureCLFLUSH_OPT); - if (regs.ebx & 0x20000000U) cpuInfo->addFeature(kX86CpuFeatureSHA); - - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeaturePREFETCHWT1); - - // Detect AVX2. - if (cpuInfo->hasFeature(kX86CpuFeatureAVX)) { - if (regs.ebx & 0x00000020U) cpuInfo->addFeature(kX86CpuFeatureAVX2); - } - - // Detect AVX-512+. - if (regs.ebx & 0x00010000U) { - // - XCR0[2:1] == 11b - // XMM & YMM states are enabled by OS. - // - XCR0[7:5] == 111b - // Upper 256-bit of ZMM0-XMM15 and ZMM16-ZMM31 state are enabled by OS. - if ((xcr0.eax & 0x00000076U) == 0x00000076U) { - cpuInfo->addFeature(kX86CpuFeatureAVX512F); - - if (regs.ebx & 0x00020000U) cpuInfo->addFeature(kX86CpuFeatureAVX512DQ); - if (regs.ebx & 0x04000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512PF); - if (regs.ebx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512ER); - if (regs.ebx & 0x10000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512CD); - if (regs.ebx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512BW); - if (regs.ebx & 0x80000000U) cpuInfo->addFeature(kX86CpuFeatureAVX512VL); - } - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0xD, ECX=0x0] - // -------------------------------------------------------------------------- - - if (maxBaseId >= 0xD && maybeMPX) { - callCpuId(®s, 0xD); - - // Both CPUID result and XCR0 has to be enabled to have support for MPX. - if (((regs.eax & xcr0.eax) & 0x00000018U) == 0x00000018U) { - cpuInfo->addFeature(kX86CpuFeatureMPX); - } - } - - // -------------------------------------------------------------------------- - // [CPUID EAX=0x80000000] - // -------------------------------------------------------------------------- - - // Calling cpuid with 0x80000000 as the in argument gets the number of valid - // extended IDs. - callCpuId(®s, 0x80000000); - - uint32_t maxExtId = Utils::iMin(regs.eax, 0x80000004); - uint32_t* brand = reinterpret_cast(cpuInfo->_brandString); - - for (i = 0x80000001; i <= maxExtId; i++) { - callCpuId(®s, i); - - switch (i) { - case 0x80000001: - if (regs.ecx & 0x00000001U) cpuInfo->addFeature(kX86CpuFeatureLahfSahf); - if (regs.ecx & 0x00000020U) cpuInfo->addFeature(kX86CpuFeatureLZCNT); - if (regs.ecx & 0x00000040U) cpuInfo->addFeature(kX86CpuFeatureSSE4A); - if (regs.ecx & 0x00000080U) cpuInfo->addFeature(kX86CpuFeatureMSSE); - if (regs.ecx & 0x00000100U) cpuInfo->addFeature(kX86CpuFeaturePREFETCH); - - if (regs.edx & 0x00100000U) cpuInfo->addFeature(kX86CpuFeatureNX); - if (regs.edx & 0x00200000U) cpuInfo->addFeature(kX86CpuFeatureFXSR_OPT); - if (regs.edx & 0x00400000U) cpuInfo->addFeature(kX86CpuFeatureMMX2); - if (regs.edx & 0x08000000U) cpuInfo->addFeature(kX86CpuFeatureRDTSCP); - if (regs.edx & 0x40000000U) cpuInfo->addFeature(kX86CpuFeature3DNOW2).addFeature(kX86CpuFeatureMMX2); - if (regs.edx & 0x80000000U) cpuInfo->addFeature(kX86CpuFeature3DNOW); - break; - - case 0x80000002: - case 0x80000003: - case 0x80000004: - *brand++ = regs.eax; - *brand++ = regs.ebx; - *brand++ = regs.ecx; - *brand++ = regs.edx; - break; - - default: - // Additional features can be detected in the future. - break; - } - } - - // Simplify the brand string (remove unnecessary spaces to make printing prettier). - x86SimplifyBrandString(cpuInfo->_brandString); -} -#endif - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64 diff --git a/src/asmjit/x86/x86cpuinfo.h b/src/asmjit/x86/x86cpuinfo.h deleted file mode 100644 index a908027..0000000 --- a/src/asmjit/x86/x86cpuinfo.h +++ /dev/null @@ -1,273 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_X86_X86CPUINFO_H -#define _ASMJIT_X86_X86CPUINFO_H - -// [Dependencies - AsmJit] -#include "../base/cpuinfo.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Forward Declarations] -// ============================================================================ - -struct X86CpuInfo; - -//! \addtogroup asmjit_x86 -//! \{ - -// ============================================================================ -// [asmjit::X86CpuFeature] -// ============================================================================ - -//! X86 CPU features. -ASMJIT_ENUM(X86CpuFeature) { - //! Cpu has Not-Execute-Bit. - kX86CpuFeatureNX = 0, - //! Cpu has multithreading. - kX86CpuFeatureMT, - //! Cpu has RDTSC. - kX86CpuFeatureRDTSC, - //! Cpu has RDTSCP. - kX86CpuFeatureRDTSCP, - //! Cpu has CMOV. - kX86CpuFeatureCMOV, - //! Cpu has CMPXCHG8B. - kX86CpuFeatureCMPXCHG8B, - //! Cpu has CMPXCHG16B (X64). - kX86CpuFeatureCMPXCHG16B, - //! Cpu has CLFUSH. - kX86CpuFeatureCLFLUSH, - //! Cpu has CLFUSH (Optimized). - kX86CpuFeatureCLFLUSH_OPT, - //! Cpu has PREFETCH. - kX86CpuFeaturePREFETCH, - //! Cpu has PREFETCHWT1. - kX86CpuFeaturePREFETCHWT1, - //! Cpu has LAHF/SAHF. - kX86CpuFeatureLahfSahf, - //! Cpu has FXSAVE/FXRSTOR. - kX86CpuFeatureFXSR, - //! Cpu has FXSAVE/FXRSTOR (Optimized). - kX86CpuFeatureFXSR_OPT, - //! Cpu has MMX. - kX86CpuFeatureMMX, - //! Cpu has extended MMX. - kX86CpuFeatureMMX2, - //! Cpu has 3dNow! - kX86CpuFeature3DNOW, - //! Cpu has enchanced 3dNow! - kX86CpuFeature3DNOW2, - //! Cpu has SSE. - kX86CpuFeatureSSE, - //! Cpu has SSE2. - kX86CpuFeatureSSE2, - //! Cpu has SSE3. - kX86CpuFeatureSSE3, - //! Cpu has SSSE3. - kX86CpuFeatureSSSE3, - //! Cpu has SSE4.A. - kX86CpuFeatureSSE4A, - //! Cpu has SSE4.1. - kX86CpuFeatureSSE4_1, - //! Cpu has SSE4.2. - kX86CpuFeatureSSE4_2, - //! Cpu has Misaligned SSE (MSSE). - kX86CpuFeatureMSSE, - //! Cpu has MONITOR and MWAIT. - kX86CpuFeatureMONITOR, - //! Cpu has MOVBE. - kX86CpuFeatureMOVBE, - //! Cpu has POPCNT. - kX86CpuFeaturePOPCNT, - //! Cpu has LZCNT. - kX86CpuFeatureLZCNT, - //! Cpu has AESNI. - kX86CpuFeatureAESNI, - //! Cpu has PCLMULQDQ. - kX86CpuFeaturePCLMULQDQ, - //! Cpu has RDRAND. - kX86CpuFeatureRDRAND, - //! Cpu has RDSEED. - kX86CpuFeatureRDSEED, - //! Cpu has SHA-1 and SHA-256. - kX86CpuFeatureSHA, - //! Cpu has XSAVE support - XSAVE/XRSTOR, XSETBV/XGETBV, and XCR0. - kX86CpuFeatureXSAVE, - //! OS has enabled XSAVE, you can call XGETBV to get value of XCR0. - kX86CpuFeatureXSAVE_OS, - //! Cpu has AVX. - kX86CpuFeatureAVX, - //! Cpu has AVX2. - kX86CpuFeatureAVX2, - //! Cpu has F16C. - kX86CpuFeatureF16C, - //! Cpu has FMA3. - kX86CpuFeatureFMA3, - //! Cpu has FMA4. - kX86CpuFeatureFMA4, - //! Cpu has XOP. - kX86CpuFeatureXOP, - //! Cpu has BMI. - kX86CpuFeatureBMI, - //! Cpu has BMI2. - kX86CpuFeatureBMI2, - //! Cpu has HLE. - kX86CpuFeatureHLE, - //! Cpu has RTM. - kX86CpuFeatureRTM, - //! Cpu has ADX. - kX86CpuFeatureADX, - //! Cpu has MPX (Memory Protection Extensions). - kX86CpuFeatureMPX, - //! Cpu has FSGSBASE. - kX86CpuFeatureFSGSBASE, - //! Cpu has optimized REP MOVSB/STOSB. - kX86CpuFeatureMOVSBSTOSB_OPT, - - //! Cpu has AVX-512F (Foundation). - kX86CpuFeatureAVX512F, - //! Cpu has AVX-512CD (Conflict Detection). - kX86CpuFeatureAVX512CD, - //! Cpu has AVX-512PF (Prefetch Instructions). - kX86CpuFeatureAVX512PF, - //! Cpu has AVX-512ER (Exponential and Reciprocal Instructions). - kX86CpuFeatureAVX512ER, - //! Cpu has AVX-512DQ (DWord/QWord). - kX86CpuFeatureAVX512DQ, - //! Cpu has AVX-512BW (Byte/Word). - kX86CpuFeatureAVX512BW, - //! Cpu has AVX VL (Vector Length Excensions). - kX86CpuFeatureAVX512VL, - - //! Count of X86/X64 Cpu features. - kX86CpuFeatureCount -}; - -// ============================================================================ -// [asmjit::X86CpuId] -// ============================================================================ - -//! X86/X64 CPUID output. -union X86CpuId { - //! EAX/EBX/ECX/EDX output. - uint32_t i[4]; - - struct { - //! EAX output. - uint32_t eax; - //! EBX output. - uint32_t ebx; - //! ECX output. - uint32_t ecx; - //! EDX output. - uint32_t edx; - }; -}; - -// ============================================================================ -// [asmjit::X86CpuUtil] -// ============================================================================ - -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 -//! CPU utilities available only if the host processor is X86/X64. -struct X86CpuUtil { - //! \internal - //! - //! Designed to support VS2008 and less in 64-bit mode, even if this compiler - //! doesn't have `__cpuidex` intrinsic. - ASMJIT_API static void _docpuid(uint32_t inEcx, uint32_t inEax, X86CpuId* out); - - //! Get the result of calling CPUID instruction to `out`. - static ASMJIT_INLINE void callCpuId(X86CpuId* out, uint32_t inEax, uint32_t inEcx = 0) { - return _docpuid(inEcx, inEax, out); - } - - //! Detect the Host CPU. - ASMJIT_API static void detect(X86CpuInfo* cpuInfo); -}; -#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - -// ============================================================================ -// [asmjit::X86CpuInfo] -// ============================================================================ - -struct X86CpuInfo : public CpuInfo { - ASMJIT_NO_COPY(X86CpuInfo) - - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - ASMJIT_INLINE X86CpuInfo(); - - // -------------------------------------------------------------------------- - // [Accessors] - // -------------------------------------------------------------------------- - - //! Get processor type. - ASMJIT_INLINE uint32_t getProcessorType() const { - return _processorType; - } - - //! Get brand index. - ASMJIT_INLINE uint32_t getBrandIndex() const { - return _brandIndex; - } - - //! Get flush cache line size. - ASMJIT_INLINE uint32_t getFlushCacheLineSize() const { - return _flushCacheLineSize; - } - - //! Get maximum logical processors count. - ASMJIT_INLINE uint32_t getMaxLogicalProcessors() const { - return _maxLogicalProcessors; - } - - // -------------------------------------------------------------------------- - // [Statics] - // -------------------------------------------------------------------------- - -#if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - //! Get global instance of `X86CpuInfo`. - static ASMJIT_INLINE const X86CpuInfo* getHost() { - return static_cast(CpuInfo::getHost()); - } -#endif // ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Processor type. - uint32_t _processorType; - //! Brand index. - uint32_t _brandIndex; - //! Flush cache line size in bytes. - uint32_t _flushCacheLineSize; - //! Maximum number of addressable IDs for logical processors. - uint32_t _maxLogicalProcessors; -}; - -ASMJIT_INLINE X86CpuInfo::X86CpuInfo() : - CpuInfo(sizeof(X86CpuInfo)) {} - -//! \} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // _ASMJIT_X86_X86CPUINFO_H diff --git a/src/asmjit/x86/x86inst.cpp b/src/asmjit/x86/x86inst.cpp index 37ee8c8..6a5fca8 100644 --- a/src/asmjit/x86/x86inst.cpp +++ b/src/asmjit/x86/x86inst.cpp @@ -98,7 +98,7 @@ ASMJIT_ENUM(X86InstFlagsInternal) { // [Macros] // ============================================================================ -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) # define INST_NAME_INDEX(_Code_) _Code_##_NameIndex #else # define INST_NAME_INDEX(_Code_) 0 @@ -159,7 +159,7 @@ ASMJIT_ENUM(X86InstFlagsInternal) { // ${X86InstData:Begin} // Automatically generated, do not edit. -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) const char _x86InstName[] = "\0" "adc\0" @@ -2027,7 +2027,7 @@ enum X86InstData_NameIndex { kX86InstIdXsaveopt64_NameIndex = 6215, kX86InstIdXsetbv_NameIndex = 6226 }; -#endif // !ASMJIT_DISABLE_NAMES +#endif // !ASMJIT_DISABLE_TEXT // Automatically generated, do not edit. const X86InstExtendedInfo _x86InstExtendedInfo[] = { @@ -2172,7 +2172,6 @@ const X86InstExtendedInfo _x86InstExtendedInfo[] = { { Enc(ExtExtract) , 0 , 8 , 0x00, 0x00, 0, { O(Gd)|O(Gb)|O(Mem), O(Xmm) , U , U , U }, F(Move) , U }, { Enc(ExtExtract) , 0 , 8 , 0x00, 0x00, 0, { O(GdMem) , O(Xmm) , U , U , U }, F(Move) , U }, { Enc(ExtExtrW) , 0 , 8 , 0x00, 0x00, 0, { O(GdMem) , O(MmXmm) , U , U , U }, F(Move) , O_000F3A(15,U,_,_,_) }, - { Enc(3dNow) , 0 , 8 , 0x00, 0x00, 0, { O(Mm) , O(MmMem) , U , U , U }, F(Move) , U }, { Enc(3dNow) , 0 , 0 , 0x00, 0x00, 0, { O(Mm) , O(MmMem) , U , U , U }, F(None) , U }, { Enc(ExtRmi) , 0 , 0 , 0x00, 0x00, 0, { O(Xmm) , O(GdMem) , O(Imm) , U , U }, F(None) , U }, { Enc(ExtRmi) , 0 , 0 , 0x00, 0x00, 0, { O(Xmm) , O(GqMem) , O(Imm) , U , U }, F(None) , U }, @@ -2705,23 +2704,23 @@ enum X86InstData_ExtendedIndex { kX86InstIdPextrw_ExtendedIndex = 140, kX86InstIdPf2id_ExtendedIndex = 141, kX86InstIdPf2iw_ExtendedIndex = 141, - kX86InstIdPfacc_ExtendedIndex = 142, - kX86InstIdPfadd_ExtendedIndex = 142, - kX86InstIdPfcmpeq_ExtendedIndex = 142, - kX86InstIdPfcmpge_ExtendedIndex = 142, - kX86InstIdPfcmpgt_ExtendedIndex = 142, - kX86InstIdPfmax_ExtendedIndex = 142, - kX86InstIdPfmin_ExtendedIndex = 142, - kX86InstIdPfmul_ExtendedIndex = 142, - kX86InstIdPfnacc_ExtendedIndex = 142, - kX86InstIdPfpnacc_ExtendedIndex = 142, + kX86InstIdPfacc_ExtendedIndex = 141, + kX86InstIdPfadd_ExtendedIndex = 141, + kX86InstIdPfcmpeq_ExtendedIndex = 141, + kX86InstIdPfcmpge_ExtendedIndex = 141, + kX86InstIdPfcmpgt_ExtendedIndex = 141, + kX86InstIdPfmax_ExtendedIndex = 141, + kX86InstIdPfmin_ExtendedIndex = 141, + kX86InstIdPfmul_ExtendedIndex = 141, + kX86InstIdPfnacc_ExtendedIndex = 141, + kX86InstIdPfpnacc_ExtendedIndex = 141, kX86InstIdPfrcp_ExtendedIndex = 141, - kX86InstIdPfrcpit1_ExtendedIndex = 142, - kX86InstIdPfrcpit2_ExtendedIndex = 142, - kX86InstIdPfrsqit1_ExtendedIndex = 142, - kX86InstIdPfrsqrt_ExtendedIndex = 142, - kX86InstIdPfsub_ExtendedIndex = 142, - kX86InstIdPfsubr_ExtendedIndex = 142, + kX86InstIdPfrcpit1_ExtendedIndex = 141, + kX86InstIdPfrcpit2_ExtendedIndex = 141, + kX86InstIdPfrsqit1_ExtendedIndex = 141, + kX86InstIdPfrsqrt_ExtendedIndex = 141, + kX86InstIdPfsub_ExtendedIndex = 141, + kX86InstIdPfsubr_ExtendedIndex = 141, kX86InstIdPhaddd_ExtendedIndex = 136, kX86InstIdPhaddsw_ExtendedIndex = 136, kX86InstIdPhaddw_ExtendedIndex = 136, @@ -2731,10 +2730,10 @@ enum X86InstData_ExtendedIndex { kX86InstIdPhsubw_ExtendedIndex = 136, kX86InstIdPi2fd_ExtendedIndex = 141, kX86InstIdPi2fw_ExtendedIndex = 141, - kX86InstIdPinsrb_ExtendedIndex = 143, - kX86InstIdPinsrd_ExtendedIndex = 143, - kX86InstIdPinsrq_ExtendedIndex = 144, - kX86InstIdPinsrw_ExtendedIndex = 145, + kX86InstIdPinsrb_ExtendedIndex = 142, + kX86InstIdPinsrd_ExtendedIndex = 142, + kX86InstIdPinsrq_ExtendedIndex = 143, + kX86InstIdPinsrw_ExtendedIndex = 144, kX86InstIdPmaddubsw_ExtendedIndex = 136, kX86InstIdPmaddwd_ExtendedIndex = 136, kX86InstIdPmaxsb_ExtendedIndex = 3, @@ -2749,7 +2748,7 @@ enum X86InstData_ExtendedIndex { kX86InstIdPminub_ExtendedIndex = 136, kX86InstIdPminud_ExtendedIndex = 3, kX86InstIdPminuw_ExtendedIndex = 3, - kX86InstIdPmovmskb_ExtendedIndex = 146, + kX86InstIdPmovmskb_ExtendedIndex = 145, kX86InstIdPmovsxbd_ExtendedIndex = 36, kX86InstIdPmovsxbq_ExtendedIndex = 36, kX86InstIdPmovsxbw_ExtendedIndex = 36, @@ -2769,33 +2768,33 @@ enum X86InstData_ExtendedIndex { kX86InstIdPmulld_ExtendedIndex = 3, kX86InstIdPmullw_ExtendedIndex = 136, kX86InstIdPmuludq_ExtendedIndex = 136, - kX86InstIdPop_ExtendedIndex = 147, + kX86InstIdPop_ExtendedIndex = 146, kX86InstIdPopa_ExtendedIndex = 16, kX86InstIdPopcnt_ExtendedIndex = 9, - kX86InstIdPopf_ExtendedIndex = 148, + kX86InstIdPopf_ExtendedIndex = 147, kX86InstIdPor_ExtendedIndex = 136, - kX86InstIdPrefetch_ExtendedIndex = 149, + kX86InstIdPrefetch_ExtendedIndex = 148, kX86InstIdPrefetch3dNow_ExtendedIndex = 19, kX86InstIdPrefetchw3dNow_ExtendedIndex = 19, kX86InstIdPsadbw_ExtendedIndex = 136, kX86InstIdPshufb_ExtendedIndex = 136, - kX86InstIdPshufd_ExtendedIndex = 150, - kX86InstIdPshufhw_ExtendedIndex = 150, - kX86InstIdPshuflw_ExtendedIndex = 150, - kX86InstIdPshufw_ExtendedIndex = 151, + kX86InstIdPshufd_ExtendedIndex = 149, + kX86InstIdPshufhw_ExtendedIndex = 149, + kX86InstIdPshuflw_ExtendedIndex = 149, + kX86InstIdPshufw_ExtendedIndex = 150, kX86InstIdPsignb_ExtendedIndex = 136, kX86InstIdPsignd_ExtendedIndex = 136, kX86InstIdPsignw_ExtendedIndex = 136, - kX86InstIdPslld_ExtendedIndex = 152, - kX86InstIdPslldq_ExtendedIndex = 153, - kX86InstIdPsllq_ExtendedIndex = 154, - kX86InstIdPsllw_ExtendedIndex = 155, - kX86InstIdPsrad_ExtendedIndex = 156, - kX86InstIdPsraw_ExtendedIndex = 157, - kX86InstIdPsrld_ExtendedIndex = 158, - kX86InstIdPsrldq_ExtendedIndex = 159, - kX86InstIdPsrlq_ExtendedIndex = 160, - kX86InstIdPsrlw_ExtendedIndex = 161, + kX86InstIdPslld_ExtendedIndex = 151, + kX86InstIdPslldq_ExtendedIndex = 152, + kX86InstIdPsllq_ExtendedIndex = 153, + kX86InstIdPsllw_ExtendedIndex = 154, + kX86InstIdPsrad_ExtendedIndex = 155, + kX86InstIdPsraw_ExtendedIndex = 156, + kX86InstIdPsrld_ExtendedIndex = 157, + kX86InstIdPsrldq_ExtendedIndex = 158, + kX86InstIdPsrlq_ExtendedIndex = 159, + kX86InstIdPsrlw_ExtendedIndex = 160, kX86InstIdPsubb_ExtendedIndex = 136, kX86InstIdPsubd_ExtendedIndex = 136, kX86InstIdPsubq_ExtendedIndex = 136, @@ -2814,103 +2813,103 @@ enum X86InstData_ExtendedIndex { kX86InstIdPunpckldq_ExtendedIndex = 136, kX86InstIdPunpcklqdq_ExtendedIndex = 3, kX86InstIdPunpcklwd_ExtendedIndex = 136, - kX86InstIdPush_ExtendedIndex = 162, + kX86InstIdPush_ExtendedIndex = 161, kX86InstIdPusha_ExtendedIndex = 16, - kX86InstIdPushf_ExtendedIndex = 163, + kX86InstIdPushf_ExtendedIndex = 162, kX86InstIdPxor_ExtendedIndex = 136, - kX86InstIdRcl_ExtendedIndex = 164, + kX86InstIdRcl_ExtendedIndex = 163, kX86InstIdRcpps_ExtendedIndex = 36, kX86InstIdRcpss_ExtendedIndex = 41, - kX86InstIdRcr_ExtendedIndex = 164, - kX86InstIdRdfsbase_ExtendedIndex = 165, - kX86InstIdRdgsbase_ExtendedIndex = 165, - kX86InstIdRdrand_ExtendedIndex = 166, + kX86InstIdRcr_ExtendedIndex = 163, + kX86InstIdRdfsbase_ExtendedIndex = 164, + kX86InstIdRdgsbase_ExtendedIndex = 164, + kX86InstIdRdrand_ExtendedIndex = 165, kX86InstIdRdtsc_ExtendedIndex = 16, kX86InstIdRdtscp_ExtendedIndex = 16, - kX86InstIdRepLodsB_ExtendedIndex = 167, - kX86InstIdRepLodsD_ExtendedIndex = 167, - kX86InstIdRepLodsQ_ExtendedIndex = 167, - kX86InstIdRepLodsW_ExtendedIndex = 167, - kX86InstIdRepMovsB_ExtendedIndex = 168, - kX86InstIdRepMovsD_ExtendedIndex = 168, - kX86InstIdRepMovsQ_ExtendedIndex = 168, - kX86InstIdRepMovsW_ExtendedIndex = 168, - kX86InstIdRepStosB_ExtendedIndex = 167, - kX86InstIdRepStosD_ExtendedIndex = 167, - kX86InstIdRepStosQ_ExtendedIndex = 167, - kX86InstIdRepStosW_ExtendedIndex = 167, - kX86InstIdRepeCmpsB_ExtendedIndex = 169, - kX86InstIdRepeCmpsD_ExtendedIndex = 169, - kX86InstIdRepeCmpsQ_ExtendedIndex = 169, - kX86InstIdRepeCmpsW_ExtendedIndex = 169, - kX86InstIdRepeScasB_ExtendedIndex = 169, - kX86InstIdRepeScasD_ExtendedIndex = 169, - kX86InstIdRepeScasQ_ExtendedIndex = 169, - kX86InstIdRepeScasW_ExtendedIndex = 169, - kX86InstIdRepneCmpsB_ExtendedIndex = 169, - kX86InstIdRepneCmpsD_ExtendedIndex = 169, - kX86InstIdRepneCmpsQ_ExtendedIndex = 169, - kX86InstIdRepneCmpsW_ExtendedIndex = 169, - kX86InstIdRepneScasB_ExtendedIndex = 169, - kX86InstIdRepneScasD_ExtendedIndex = 169, - kX86InstIdRepneScasQ_ExtendedIndex = 169, - kX86InstIdRepneScasW_ExtendedIndex = 169, - kX86InstIdRet_ExtendedIndex = 170, - kX86InstIdRol_ExtendedIndex = 171, - kX86InstIdRor_ExtendedIndex = 171, - kX86InstIdRorx_ExtendedIndex = 172, - kX86InstIdRoundpd_ExtendedIndex = 150, - kX86InstIdRoundps_ExtendedIndex = 150, - kX86InstIdRoundsd_ExtendedIndex = 173, - kX86InstIdRoundss_ExtendedIndex = 174, + kX86InstIdRepLodsB_ExtendedIndex = 166, + kX86InstIdRepLodsD_ExtendedIndex = 166, + kX86InstIdRepLodsQ_ExtendedIndex = 166, + kX86InstIdRepLodsW_ExtendedIndex = 166, + kX86InstIdRepMovsB_ExtendedIndex = 167, + kX86InstIdRepMovsD_ExtendedIndex = 167, + kX86InstIdRepMovsQ_ExtendedIndex = 167, + kX86InstIdRepMovsW_ExtendedIndex = 167, + kX86InstIdRepStosB_ExtendedIndex = 166, + kX86InstIdRepStosD_ExtendedIndex = 166, + kX86InstIdRepStosQ_ExtendedIndex = 166, + kX86InstIdRepStosW_ExtendedIndex = 166, + kX86InstIdRepeCmpsB_ExtendedIndex = 168, + kX86InstIdRepeCmpsD_ExtendedIndex = 168, + kX86InstIdRepeCmpsQ_ExtendedIndex = 168, + kX86InstIdRepeCmpsW_ExtendedIndex = 168, + kX86InstIdRepeScasB_ExtendedIndex = 168, + kX86InstIdRepeScasD_ExtendedIndex = 168, + kX86InstIdRepeScasQ_ExtendedIndex = 168, + kX86InstIdRepeScasW_ExtendedIndex = 168, + kX86InstIdRepneCmpsB_ExtendedIndex = 168, + kX86InstIdRepneCmpsD_ExtendedIndex = 168, + kX86InstIdRepneCmpsQ_ExtendedIndex = 168, + kX86InstIdRepneCmpsW_ExtendedIndex = 168, + kX86InstIdRepneScasB_ExtendedIndex = 168, + kX86InstIdRepneScasD_ExtendedIndex = 168, + kX86InstIdRepneScasQ_ExtendedIndex = 168, + kX86InstIdRepneScasW_ExtendedIndex = 168, + kX86InstIdRet_ExtendedIndex = 169, + kX86InstIdRol_ExtendedIndex = 170, + kX86InstIdRor_ExtendedIndex = 170, + kX86InstIdRorx_ExtendedIndex = 171, + kX86InstIdRoundpd_ExtendedIndex = 149, + kX86InstIdRoundps_ExtendedIndex = 149, + kX86InstIdRoundsd_ExtendedIndex = 172, + kX86InstIdRoundss_ExtendedIndex = 173, kX86InstIdRsqrtps_ExtendedIndex = 36, kX86InstIdRsqrtss_ExtendedIndex = 41, - kX86InstIdSahf_ExtendedIndex = 175, - kX86InstIdSal_ExtendedIndex = 176, - kX86InstIdSar_ExtendedIndex = 176, - kX86InstIdSarx_ExtendedIndex = 177, + kX86InstIdSahf_ExtendedIndex = 174, + kX86InstIdSal_ExtendedIndex = 175, + kX86InstIdSar_ExtendedIndex = 175, + kX86InstIdSarx_ExtendedIndex = 176, kX86InstIdSbb_ExtendedIndex = 1, kX86InstIdScasB_ExtendedIndex = 30, kX86InstIdScasD_ExtendedIndex = 30, kX86InstIdScasQ_ExtendedIndex = 30, kX86InstIdScasW_ExtendedIndex = 31, - kX86InstIdSeta_ExtendedIndex = 178, - kX86InstIdSetae_ExtendedIndex = 179, - kX86InstIdSetb_ExtendedIndex = 179, - kX86InstIdSetbe_ExtendedIndex = 178, - kX86InstIdSetc_ExtendedIndex = 179, - kX86InstIdSete_ExtendedIndex = 180, - kX86InstIdSetg_ExtendedIndex = 181, - kX86InstIdSetge_ExtendedIndex = 182, - kX86InstIdSetl_ExtendedIndex = 182, - kX86InstIdSetle_ExtendedIndex = 181, - kX86InstIdSetna_ExtendedIndex = 178, - kX86InstIdSetnae_ExtendedIndex = 179, - kX86InstIdSetnb_ExtendedIndex = 179, - kX86InstIdSetnbe_ExtendedIndex = 178, - kX86InstIdSetnc_ExtendedIndex = 179, - kX86InstIdSetne_ExtendedIndex = 180, - kX86InstIdSetng_ExtendedIndex = 181, - kX86InstIdSetnge_ExtendedIndex = 182, - kX86InstIdSetnl_ExtendedIndex = 182, - kX86InstIdSetnle_ExtendedIndex = 181, - kX86InstIdSetno_ExtendedIndex = 183, - kX86InstIdSetnp_ExtendedIndex = 184, - kX86InstIdSetns_ExtendedIndex = 185, - kX86InstIdSetnz_ExtendedIndex = 180, - kX86InstIdSeto_ExtendedIndex = 183, - kX86InstIdSetp_ExtendedIndex = 184, - kX86InstIdSetpe_ExtendedIndex = 184, - kX86InstIdSetpo_ExtendedIndex = 184, - kX86InstIdSets_ExtendedIndex = 185, - kX86InstIdSetz_ExtendedIndex = 180, + kX86InstIdSeta_ExtendedIndex = 177, + kX86InstIdSetae_ExtendedIndex = 178, + kX86InstIdSetb_ExtendedIndex = 178, + kX86InstIdSetbe_ExtendedIndex = 177, + kX86InstIdSetc_ExtendedIndex = 178, + kX86InstIdSete_ExtendedIndex = 179, + kX86InstIdSetg_ExtendedIndex = 180, + kX86InstIdSetge_ExtendedIndex = 181, + kX86InstIdSetl_ExtendedIndex = 181, + kX86InstIdSetle_ExtendedIndex = 180, + kX86InstIdSetna_ExtendedIndex = 177, + kX86InstIdSetnae_ExtendedIndex = 178, + kX86InstIdSetnb_ExtendedIndex = 178, + kX86InstIdSetnbe_ExtendedIndex = 177, + kX86InstIdSetnc_ExtendedIndex = 178, + kX86InstIdSetne_ExtendedIndex = 179, + kX86InstIdSetng_ExtendedIndex = 180, + kX86InstIdSetnge_ExtendedIndex = 181, + kX86InstIdSetnl_ExtendedIndex = 181, + kX86InstIdSetnle_ExtendedIndex = 180, + kX86InstIdSetno_ExtendedIndex = 182, + kX86InstIdSetnp_ExtendedIndex = 183, + kX86InstIdSetns_ExtendedIndex = 184, + kX86InstIdSetnz_ExtendedIndex = 179, + kX86InstIdSeto_ExtendedIndex = 182, + kX86InstIdSetp_ExtendedIndex = 183, + kX86InstIdSetpe_ExtendedIndex = 183, + kX86InstIdSetpo_ExtendedIndex = 183, + kX86InstIdSets_ExtendedIndex = 184, + kX86InstIdSetz_ExtendedIndex = 179, kX86InstIdSfence_ExtendedIndex = 91, - kX86InstIdShl_ExtendedIndex = 176, - kX86InstIdShld_ExtendedIndex = 186, - kX86InstIdShlx_ExtendedIndex = 177, - kX86InstIdShr_ExtendedIndex = 176, - kX86InstIdShrd_ExtendedIndex = 187, - kX86InstIdShrx_ExtendedIndex = 177, + kX86InstIdShl_ExtendedIndex = 175, + kX86InstIdShld_ExtendedIndex = 185, + kX86InstIdShlx_ExtendedIndex = 176, + kX86InstIdShr_ExtendedIndex = 175, + kX86InstIdShrd_ExtendedIndex = 186, + kX86InstIdShrx_ExtendedIndex = 176, kX86InstIdShufpd_ExtendedIndex = 4, kX86InstIdShufps_ExtendedIndex = 4, kX86InstIdSqrtpd_ExtendedIndex = 36, @@ -2920,17 +2919,17 @@ enum X86InstData_ExtendedIndex { kX86InstIdStc_ExtendedIndex = 17, kX86InstIdStd_ExtendedIndex = 18, kX86InstIdStmxcsr_ExtendedIndex = 19, - kX86InstIdStosB_ExtendedIndex = 188, - kX86InstIdStosD_ExtendedIndex = 188, - kX86InstIdStosQ_ExtendedIndex = 188, - kX86InstIdStosW_ExtendedIndex = 189, + kX86InstIdStosB_ExtendedIndex = 187, + kX86InstIdStosD_ExtendedIndex = 187, + kX86InstIdStosQ_ExtendedIndex = 187, + kX86InstIdStosW_ExtendedIndex = 188, kX86InstIdSub_ExtendedIndex = 2, kX86InstIdSubpd_ExtendedIndex = 3, kX86InstIdSubps_ExtendedIndex = 3, kX86InstIdSubsd_ExtendedIndex = 3, kX86InstIdSubss_ExtendedIndex = 3, - kX86InstIdTest_ExtendedIndex = 190, - kX86InstIdTzcnt_ExtendedIndex = 191, + kX86InstIdTest_ExtendedIndex = 189, + kX86InstIdTzcnt_ExtendedIndex = 190, kX86InstIdUcomisd_ExtendedIndex = 34, kX86InstIdUcomiss_ExtendedIndex = 34, kX86InstIdUd2_ExtendedIndex = 48, @@ -2938,444 +2937,444 @@ enum X86InstData_ExtendedIndex { kX86InstIdUnpckhps_ExtendedIndex = 3, kX86InstIdUnpcklpd_ExtendedIndex = 3, kX86InstIdUnpcklps_ExtendedIndex = 3, - kX86InstIdVaddpd_ExtendedIndex = 192, - kX86InstIdVaddps_ExtendedIndex = 192, - kX86InstIdVaddsd_ExtendedIndex = 193, - kX86InstIdVaddss_ExtendedIndex = 193, - kX86InstIdVaddsubpd_ExtendedIndex = 192, - kX86InstIdVaddsubps_ExtendedIndex = 192, - kX86InstIdVaesdec_ExtendedIndex = 193, - kX86InstIdVaesdeclast_ExtendedIndex = 193, - kX86InstIdVaesenc_ExtendedIndex = 193, - kX86InstIdVaesenclast_ExtendedIndex = 193, - kX86InstIdVaesimc_ExtendedIndex = 194, - kX86InstIdVaeskeygenassist_ExtendedIndex = 195, - kX86InstIdVandnpd_ExtendedIndex = 192, - kX86InstIdVandnps_ExtendedIndex = 192, - kX86InstIdVandpd_ExtendedIndex = 192, - kX86InstIdVandps_ExtendedIndex = 192, - kX86InstIdVblendpd_ExtendedIndex = 196, - kX86InstIdVblendps_ExtendedIndex = 196, - kX86InstIdVblendvpd_ExtendedIndex = 197, - kX86InstIdVblendvps_ExtendedIndex = 197, - kX86InstIdVbroadcastf128_ExtendedIndex = 198, - kX86InstIdVbroadcasti128_ExtendedIndex = 198, - kX86InstIdVbroadcastsd_ExtendedIndex = 199, - kX86InstIdVbroadcastss_ExtendedIndex = 200, - kX86InstIdVcmppd_ExtendedIndex = 196, - kX86InstIdVcmpps_ExtendedIndex = 196, - kX86InstIdVcmpsd_ExtendedIndex = 201, - kX86InstIdVcmpss_ExtendedIndex = 201, - kX86InstIdVcomisd_ExtendedIndex = 194, - kX86InstIdVcomiss_ExtendedIndex = 194, - kX86InstIdVcvtdq2pd_ExtendedIndex = 200, - kX86InstIdVcvtdq2ps_ExtendedIndex = 202, - kX86InstIdVcvtpd2dq_ExtendedIndex = 203, - kX86InstIdVcvtpd2ps_ExtendedIndex = 203, - kX86InstIdVcvtph2ps_ExtendedIndex = 200, - kX86InstIdVcvtps2dq_ExtendedIndex = 202, - kX86InstIdVcvtps2pd_ExtendedIndex = 200, - kX86InstIdVcvtps2ph_ExtendedIndex = 204, - kX86InstIdVcvtsd2si_ExtendedIndex = 205, - kX86InstIdVcvtsd2ss_ExtendedIndex = 193, - kX86InstIdVcvtsi2sd_ExtendedIndex = 206, - kX86InstIdVcvtsi2ss_ExtendedIndex = 206, - kX86InstIdVcvtss2sd_ExtendedIndex = 193, - kX86InstIdVcvtss2si_ExtendedIndex = 205, - kX86InstIdVcvttpd2dq_ExtendedIndex = 207, - kX86InstIdVcvttps2dq_ExtendedIndex = 202, - kX86InstIdVcvttsd2si_ExtendedIndex = 205, - kX86InstIdVcvttss2si_ExtendedIndex = 205, - kX86InstIdVdivpd_ExtendedIndex = 192, - kX86InstIdVdivps_ExtendedIndex = 192, - kX86InstIdVdivsd_ExtendedIndex = 193, - kX86InstIdVdivss_ExtendedIndex = 193, - kX86InstIdVdppd_ExtendedIndex = 201, - kX86InstIdVdpps_ExtendedIndex = 196, - kX86InstIdVextractf128_ExtendedIndex = 208, - kX86InstIdVextracti128_ExtendedIndex = 208, - kX86InstIdVextractps_ExtendedIndex = 209, - kX86InstIdVfmadd132pd_ExtendedIndex = 192, - kX86InstIdVfmadd132ps_ExtendedIndex = 192, - kX86InstIdVfmadd132sd_ExtendedIndex = 193, - kX86InstIdVfmadd132ss_ExtendedIndex = 193, - kX86InstIdVfmadd213pd_ExtendedIndex = 192, - kX86InstIdVfmadd213ps_ExtendedIndex = 192, - kX86InstIdVfmadd213sd_ExtendedIndex = 193, - kX86InstIdVfmadd213ss_ExtendedIndex = 193, - kX86InstIdVfmadd231pd_ExtendedIndex = 192, - kX86InstIdVfmadd231ps_ExtendedIndex = 192, - kX86InstIdVfmadd231sd_ExtendedIndex = 193, - kX86InstIdVfmadd231ss_ExtendedIndex = 193, - kX86InstIdVfmaddpd_ExtendedIndex = 210, - kX86InstIdVfmaddps_ExtendedIndex = 210, - kX86InstIdVfmaddsd_ExtendedIndex = 211, - kX86InstIdVfmaddss_ExtendedIndex = 211, - kX86InstIdVfmaddsub132pd_ExtendedIndex = 192, - kX86InstIdVfmaddsub132ps_ExtendedIndex = 192, - kX86InstIdVfmaddsub213pd_ExtendedIndex = 192, - kX86InstIdVfmaddsub213ps_ExtendedIndex = 192, - kX86InstIdVfmaddsub231pd_ExtendedIndex = 192, - kX86InstIdVfmaddsub231ps_ExtendedIndex = 192, - kX86InstIdVfmaddsubpd_ExtendedIndex = 210, - kX86InstIdVfmaddsubps_ExtendedIndex = 210, - kX86InstIdVfmsub132pd_ExtendedIndex = 192, - kX86InstIdVfmsub132ps_ExtendedIndex = 192, - kX86InstIdVfmsub132sd_ExtendedIndex = 193, - kX86InstIdVfmsub132ss_ExtendedIndex = 193, - kX86InstIdVfmsub213pd_ExtendedIndex = 192, - kX86InstIdVfmsub213ps_ExtendedIndex = 192, - kX86InstIdVfmsub213sd_ExtendedIndex = 193, - kX86InstIdVfmsub213ss_ExtendedIndex = 193, - kX86InstIdVfmsub231pd_ExtendedIndex = 192, - kX86InstIdVfmsub231ps_ExtendedIndex = 192, - kX86InstIdVfmsub231sd_ExtendedIndex = 193, - kX86InstIdVfmsub231ss_ExtendedIndex = 193, - kX86InstIdVfmsubadd132pd_ExtendedIndex = 192, - kX86InstIdVfmsubadd132ps_ExtendedIndex = 192, - kX86InstIdVfmsubadd213pd_ExtendedIndex = 192, - kX86InstIdVfmsubadd213ps_ExtendedIndex = 192, - kX86InstIdVfmsubadd231pd_ExtendedIndex = 192, - kX86InstIdVfmsubadd231ps_ExtendedIndex = 192, - kX86InstIdVfmsubaddpd_ExtendedIndex = 210, - kX86InstIdVfmsubaddps_ExtendedIndex = 210, - kX86InstIdVfmsubpd_ExtendedIndex = 210, - kX86InstIdVfmsubps_ExtendedIndex = 210, - kX86InstIdVfmsubsd_ExtendedIndex = 211, - kX86InstIdVfmsubss_ExtendedIndex = 211, - kX86InstIdVfnmadd132pd_ExtendedIndex = 192, - kX86InstIdVfnmadd132ps_ExtendedIndex = 192, - kX86InstIdVfnmadd132sd_ExtendedIndex = 193, - kX86InstIdVfnmadd132ss_ExtendedIndex = 193, - kX86InstIdVfnmadd213pd_ExtendedIndex = 192, - kX86InstIdVfnmadd213ps_ExtendedIndex = 192, - kX86InstIdVfnmadd213sd_ExtendedIndex = 193, - kX86InstIdVfnmadd213ss_ExtendedIndex = 193, - kX86InstIdVfnmadd231pd_ExtendedIndex = 192, - kX86InstIdVfnmadd231ps_ExtendedIndex = 192, - kX86InstIdVfnmadd231sd_ExtendedIndex = 193, - kX86InstIdVfnmadd231ss_ExtendedIndex = 193, - kX86InstIdVfnmaddpd_ExtendedIndex = 210, - kX86InstIdVfnmaddps_ExtendedIndex = 210, - kX86InstIdVfnmaddsd_ExtendedIndex = 211, - kX86InstIdVfnmaddss_ExtendedIndex = 211, - kX86InstIdVfnmsub132pd_ExtendedIndex = 192, - kX86InstIdVfnmsub132ps_ExtendedIndex = 192, - kX86InstIdVfnmsub132sd_ExtendedIndex = 193, - kX86InstIdVfnmsub132ss_ExtendedIndex = 193, - kX86InstIdVfnmsub213pd_ExtendedIndex = 192, - kX86InstIdVfnmsub213ps_ExtendedIndex = 192, - kX86InstIdVfnmsub213sd_ExtendedIndex = 193, - kX86InstIdVfnmsub213ss_ExtendedIndex = 193, - kX86InstIdVfnmsub231pd_ExtendedIndex = 192, - kX86InstIdVfnmsub231ps_ExtendedIndex = 192, - kX86InstIdVfnmsub231sd_ExtendedIndex = 193, - kX86InstIdVfnmsub231ss_ExtendedIndex = 193, - kX86InstIdVfnmsubpd_ExtendedIndex = 210, - kX86InstIdVfnmsubps_ExtendedIndex = 210, - kX86InstIdVfnmsubsd_ExtendedIndex = 211, - kX86InstIdVfnmsubss_ExtendedIndex = 211, - kX86InstIdVfrczpd_ExtendedIndex = 212, - kX86InstIdVfrczps_ExtendedIndex = 212, - kX86InstIdVfrczsd_ExtendedIndex = 213, - kX86InstIdVfrczss_ExtendedIndex = 213, - kX86InstIdVgatherdpd_ExtendedIndex = 214, - kX86InstIdVgatherdps_ExtendedIndex = 214, - kX86InstIdVgatherqpd_ExtendedIndex = 214, - kX86InstIdVgatherqps_ExtendedIndex = 215, - kX86InstIdVhaddpd_ExtendedIndex = 192, - kX86InstIdVhaddps_ExtendedIndex = 192, - kX86InstIdVhsubpd_ExtendedIndex = 192, - kX86InstIdVhsubps_ExtendedIndex = 192, - kX86InstIdVinsertf128_ExtendedIndex = 216, - kX86InstIdVinserti128_ExtendedIndex = 216, - kX86InstIdVinsertps_ExtendedIndex = 201, - kX86InstIdVlddqu_ExtendedIndex = 217, - kX86InstIdVldmxcsr_ExtendedIndex = 218, - kX86InstIdVmaskmovdqu_ExtendedIndex = 219, - kX86InstIdVmaskmovpd_ExtendedIndex = 220, - kX86InstIdVmaskmovps_ExtendedIndex = 221, - kX86InstIdVmaxpd_ExtendedIndex = 192, - kX86InstIdVmaxps_ExtendedIndex = 192, - kX86InstIdVmaxsd_ExtendedIndex = 192, - kX86InstIdVmaxss_ExtendedIndex = 192, - kX86InstIdVminpd_ExtendedIndex = 192, - kX86InstIdVminps_ExtendedIndex = 192, - kX86InstIdVminsd_ExtendedIndex = 192, - kX86InstIdVminss_ExtendedIndex = 192, - kX86InstIdVmovapd_ExtendedIndex = 222, - kX86InstIdVmovaps_ExtendedIndex = 223, - kX86InstIdVmovd_ExtendedIndex = 224, - kX86InstIdVmovddup_ExtendedIndex = 202, - kX86InstIdVmovdqa_ExtendedIndex = 225, - kX86InstIdVmovdqu_ExtendedIndex = 226, - kX86InstIdVmovhlps_ExtendedIndex = 227, - kX86InstIdVmovhpd_ExtendedIndex = 228, - kX86InstIdVmovhps_ExtendedIndex = 229, - kX86InstIdVmovlhps_ExtendedIndex = 227, - kX86InstIdVmovlpd_ExtendedIndex = 230, - kX86InstIdVmovlps_ExtendedIndex = 231, - kX86InstIdVmovmskpd_ExtendedIndex = 232, - kX86InstIdVmovmskps_ExtendedIndex = 232, - kX86InstIdVmovntdq_ExtendedIndex = 233, - kX86InstIdVmovntdqa_ExtendedIndex = 217, - kX86InstIdVmovntpd_ExtendedIndex = 233, - kX86InstIdVmovntps_ExtendedIndex = 233, - kX86InstIdVmovq_ExtendedIndex = 224, - kX86InstIdVmovsd_ExtendedIndex = 234, - kX86InstIdVmovshdup_ExtendedIndex = 202, - kX86InstIdVmovsldup_ExtendedIndex = 202, - kX86InstIdVmovss_ExtendedIndex = 235, - kX86InstIdVmovupd_ExtendedIndex = 236, - kX86InstIdVmovups_ExtendedIndex = 237, - kX86InstIdVmpsadbw_ExtendedIndex = 196, - kX86InstIdVmulpd_ExtendedIndex = 192, - kX86InstIdVmulps_ExtendedIndex = 192, - kX86InstIdVmulsd_ExtendedIndex = 192, - kX86InstIdVmulss_ExtendedIndex = 192, - kX86InstIdVorpd_ExtendedIndex = 192, - kX86InstIdVorps_ExtendedIndex = 192, - kX86InstIdVpabsb_ExtendedIndex = 202, - kX86InstIdVpabsd_ExtendedIndex = 202, - kX86InstIdVpabsw_ExtendedIndex = 202, - kX86InstIdVpackssdw_ExtendedIndex = 192, - kX86InstIdVpacksswb_ExtendedIndex = 192, - kX86InstIdVpackusdw_ExtendedIndex = 192, - kX86InstIdVpackuswb_ExtendedIndex = 192, - kX86InstIdVpaddb_ExtendedIndex = 192, - kX86InstIdVpaddd_ExtendedIndex = 192, - kX86InstIdVpaddq_ExtendedIndex = 192, - kX86InstIdVpaddsb_ExtendedIndex = 192, - kX86InstIdVpaddsw_ExtendedIndex = 192, - kX86InstIdVpaddusb_ExtendedIndex = 192, - kX86InstIdVpaddusw_ExtendedIndex = 192, - kX86InstIdVpaddw_ExtendedIndex = 192, - kX86InstIdVpalignr_ExtendedIndex = 196, - kX86InstIdVpand_ExtendedIndex = 192, - kX86InstIdVpandn_ExtendedIndex = 192, - kX86InstIdVpavgb_ExtendedIndex = 192, - kX86InstIdVpavgw_ExtendedIndex = 192, - kX86InstIdVpblendd_ExtendedIndex = 196, - kX86InstIdVpblendvb_ExtendedIndex = 238, - kX86InstIdVpblendw_ExtendedIndex = 196, - kX86InstIdVpbroadcastb_ExtendedIndex = 200, - kX86InstIdVpbroadcastd_ExtendedIndex = 200, - kX86InstIdVpbroadcastq_ExtendedIndex = 200, - kX86InstIdVpbroadcastw_ExtendedIndex = 200, - kX86InstIdVpclmulqdq_ExtendedIndex = 201, - kX86InstIdVpcmov_ExtendedIndex = 239, - kX86InstIdVpcmpeqb_ExtendedIndex = 192, - kX86InstIdVpcmpeqd_ExtendedIndex = 192, - kX86InstIdVpcmpeqq_ExtendedIndex = 192, - kX86InstIdVpcmpeqw_ExtendedIndex = 192, - kX86InstIdVpcmpestri_ExtendedIndex = 195, - kX86InstIdVpcmpestrm_ExtendedIndex = 195, - kX86InstIdVpcmpgtb_ExtendedIndex = 192, - kX86InstIdVpcmpgtd_ExtendedIndex = 192, - kX86InstIdVpcmpgtq_ExtendedIndex = 192, - kX86InstIdVpcmpgtw_ExtendedIndex = 192, - kX86InstIdVpcmpistri_ExtendedIndex = 195, - kX86InstIdVpcmpistrm_ExtendedIndex = 195, - kX86InstIdVpcomb_ExtendedIndex = 240, - kX86InstIdVpcomd_ExtendedIndex = 240, - kX86InstIdVpcomq_ExtendedIndex = 240, - kX86InstIdVpcomub_ExtendedIndex = 240, - kX86InstIdVpcomud_ExtendedIndex = 240, - kX86InstIdVpcomuq_ExtendedIndex = 240, - kX86InstIdVpcomuw_ExtendedIndex = 240, - kX86InstIdVpcomw_ExtendedIndex = 240, - kX86InstIdVperm2f128_ExtendedIndex = 241, - kX86InstIdVperm2i128_ExtendedIndex = 241, - kX86InstIdVpermd_ExtendedIndex = 242, - kX86InstIdVpermil2pd_ExtendedIndex = 243, - kX86InstIdVpermil2ps_ExtendedIndex = 243, - kX86InstIdVpermilpd_ExtendedIndex = 244, - kX86InstIdVpermilps_ExtendedIndex = 245, - kX86InstIdVpermpd_ExtendedIndex = 246, - kX86InstIdVpermps_ExtendedIndex = 242, - kX86InstIdVpermq_ExtendedIndex = 246, - kX86InstIdVpextrb_ExtendedIndex = 247, - kX86InstIdVpextrd_ExtendedIndex = 209, - kX86InstIdVpextrq_ExtendedIndex = 248, - kX86InstIdVpextrw_ExtendedIndex = 249, - kX86InstIdVpgatherdd_ExtendedIndex = 214, - kX86InstIdVpgatherdq_ExtendedIndex = 214, - kX86InstIdVpgatherqd_ExtendedIndex = 215, - kX86InstIdVpgatherqq_ExtendedIndex = 214, - kX86InstIdVphaddbd_ExtendedIndex = 213, - kX86InstIdVphaddbq_ExtendedIndex = 213, - kX86InstIdVphaddbw_ExtendedIndex = 213, - kX86InstIdVphaddd_ExtendedIndex = 192, - kX86InstIdVphadddq_ExtendedIndex = 213, - kX86InstIdVphaddsw_ExtendedIndex = 192, - kX86InstIdVphaddubd_ExtendedIndex = 213, - kX86InstIdVphaddubq_ExtendedIndex = 213, - kX86InstIdVphaddubw_ExtendedIndex = 213, - kX86InstIdVphaddudq_ExtendedIndex = 213, - kX86InstIdVphadduwd_ExtendedIndex = 213, - kX86InstIdVphadduwq_ExtendedIndex = 213, - kX86InstIdVphaddw_ExtendedIndex = 192, - kX86InstIdVphaddwd_ExtendedIndex = 213, - kX86InstIdVphaddwq_ExtendedIndex = 213, - kX86InstIdVphminposuw_ExtendedIndex = 194, - kX86InstIdVphsubbw_ExtendedIndex = 213, - kX86InstIdVphsubd_ExtendedIndex = 192, - kX86InstIdVphsubdq_ExtendedIndex = 213, - kX86InstIdVphsubsw_ExtendedIndex = 192, - kX86InstIdVphsubw_ExtendedIndex = 192, - kX86InstIdVphsubwd_ExtendedIndex = 213, - kX86InstIdVpinsrb_ExtendedIndex = 250, - kX86InstIdVpinsrd_ExtendedIndex = 251, - kX86InstIdVpinsrq_ExtendedIndex = 252, - kX86InstIdVpinsrw_ExtendedIndex = 253, - kX86InstIdVpmacsdd_ExtendedIndex = 254, - kX86InstIdVpmacsdqh_ExtendedIndex = 254, - kX86InstIdVpmacsdql_ExtendedIndex = 254, - kX86InstIdVpmacssdd_ExtendedIndex = 254, - kX86InstIdVpmacssdqh_ExtendedIndex = 254, - kX86InstIdVpmacssdql_ExtendedIndex = 254, - kX86InstIdVpmacsswd_ExtendedIndex = 254, - kX86InstIdVpmacssww_ExtendedIndex = 254, - kX86InstIdVpmacswd_ExtendedIndex = 254, - kX86InstIdVpmacsww_ExtendedIndex = 254, - kX86InstIdVpmadcsswd_ExtendedIndex = 254, - kX86InstIdVpmadcswd_ExtendedIndex = 254, - kX86InstIdVpmaddubsw_ExtendedIndex = 192, - kX86InstIdVpmaddwd_ExtendedIndex = 192, - kX86InstIdVpmaskmovd_ExtendedIndex = 255, - kX86InstIdVpmaskmovq_ExtendedIndex = 255, - kX86InstIdVpmaxsb_ExtendedIndex = 192, - kX86InstIdVpmaxsd_ExtendedIndex = 192, - kX86InstIdVpmaxsw_ExtendedIndex = 192, - kX86InstIdVpmaxub_ExtendedIndex = 192, - kX86InstIdVpmaxud_ExtendedIndex = 192, - kX86InstIdVpmaxuw_ExtendedIndex = 192, - kX86InstIdVpminsb_ExtendedIndex = 192, - kX86InstIdVpminsd_ExtendedIndex = 192, - kX86InstIdVpminsw_ExtendedIndex = 192, - kX86InstIdVpminub_ExtendedIndex = 192, - kX86InstIdVpminud_ExtendedIndex = 192, - kX86InstIdVpminuw_ExtendedIndex = 192, - kX86InstIdVpmovmskb_ExtendedIndex = 232, - kX86InstIdVpmovsxbd_ExtendedIndex = 202, - kX86InstIdVpmovsxbq_ExtendedIndex = 202, - kX86InstIdVpmovsxbw_ExtendedIndex = 202, - kX86InstIdVpmovsxdq_ExtendedIndex = 202, - kX86InstIdVpmovsxwd_ExtendedIndex = 202, - kX86InstIdVpmovsxwq_ExtendedIndex = 202, - kX86InstIdVpmovzxbd_ExtendedIndex = 202, - kX86InstIdVpmovzxbq_ExtendedIndex = 202, - kX86InstIdVpmovzxbw_ExtendedIndex = 202, - kX86InstIdVpmovzxdq_ExtendedIndex = 202, - kX86InstIdVpmovzxwd_ExtendedIndex = 202, - kX86InstIdVpmovzxwq_ExtendedIndex = 202, - kX86InstIdVpmuldq_ExtendedIndex = 192, - kX86InstIdVpmulhrsw_ExtendedIndex = 192, - kX86InstIdVpmulhuw_ExtendedIndex = 192, - kX86InstIdVpmulhw_ExtendedIndex = 192, - kX86InstIdVpmulld_ExtendedIndex = 192, - kX86InstIdVpmullw_ExtendedIndex = 192, - kX86InstIdVpmuludq_ExtendedIndex = 192, - kX86InstIdVpor_ExtendedIndex = 192, - kX86InstIdVpperm_ExtendedIndex = 256, - kX86InstIdVprotb_ExtendedIndex = 257, - kX86InstIdVprotd_ExtendedIndex = 258, - kX86InstIdVprotq_ExtendedIndex = 259, - kX86InstIdVprotw_ExtendedIndex = 260, - kX86InstIdVpsadbw_ExtendedIndex = 192, - kX86InstIdVpshab_ExtendedIndex = 261, - kX86InstIdVpshad_ExtendedIndex = 261, - kX86InstIdVpshaq_ExtendedIndex = 261, - kX86InstIdVpshaw_ExtendedIndex = 261, - kX86InstIdVpshlb_ExtendedIndex = 261, - kX86InstIdVpshld_ExtendedIndex = 261, - kX86InstIdVpshlq_ExtendedIndex = 261, - kX86InstIdVpshlw_ExtendedIndex = 261, - kX86InstIdVpshufb_ExtendedIndex = 192, - kX86InstIdVpshufd_ExtendedIndex = 262, - kX86InstIdVpshufhw_ExtendedIndex = 262, - kX86InstIdVpshuflw_ExtendedIndex = 262, - kX86InstIdVpsignb_ExtendedIndex = 192, - kX86InstIdVpsignd_ExtendedIndex = 192, - kX86InstIdVpsignw_ExtendedIndex = 192, - kX86InstIdVpslld_ExtendedIndex = 263, - kX86InstIdVpslldq_ExtendedIndex = 264, - kX86InstIdVpsllq_ExtendedIndex = 265, - kX86InstIdVpsllvd_ExtendedIndex = 192, - kX86InstIdVpsllvq_ExtendedIndex = 192, - kX86InstIdVpsllw_ExtendedIndex = 266, - kX86InstIdVpsrad_ExtendedIndex = 267, - kX86InstIdVpsravd_ExtendedIndex = 192, - kX86InstIdVpsraw_ExtendedIndex = 268, - kX86InstIdVpsrld_ExtendedIndex = 269, - kX86InstIdVpsrldq_ExtendedIndex = 264, - kX86InstIdVpsrlq_ExtendedIndex = 270, - kX86InstIdVpsrlvd_ExtendedIndex = 192, - kX86InstIdVpsrlvq_ExtendedIndex = 192, - kX86InstIdVpsrlw_ExtendedIndex = 271, - kX86InstIdVpsubb_ExtendedIndex = 192, - kX86InstIdVpsubd_ExtendedIndex = 192, - kX86InstIdVpsubq_ExtendedIndex = 192, - kX86InstIdVpsubsb_ExtendedIndex = 192, - kX86InstIdVpsubsw_ExtendedIndex = 192, - kX86InstIdVpsubusb_ExtendedIndex = 192, - kX86InstIdVpsubusw_ExtendedIndex = 192, - kX86InstIdVpsubw_ExtendedIndex = 192, - kX86InstIdVptest_ExtendedIndex = 272, - kX86InstIdVpunpckhbw_ExtendedIndex = 192, - kX86InstIdVpunpckhdq_ExtendedIndex = 192, - kX86InstIdVpunpckhqdq_ExtendedIndex = 192, - kX86InstIdVpunpckhwd_ExtendedIndex = 192, - kX86InstIdVpunpcklbw_ExtendedIndex = 192, - kX86InstIdVpunpckldq_ExtendedIndex = 192, - kX86InstIdVpunpcklqdq_ExtendedIndex = 192, - kX86InstIdVpunpcklwd_ExtendedIndex = 192, - kX86InstIdVpxor_ExtendedIndex = 192, - kX86InstIdVrcpps_ExtendedIndex = 202, - kX86InstIdVrcpss_ExtendedIndex = 193, - kX86InstIdVroundpd_ExtendedIndex = 262, - kX86InstIdVroundps_ExtendedIndex = 262, - kX86InstIdVroundsd_ExtendedIndex = 201, - kX86InstIdVroundss_ExtendedIndex = 201, - kX86InstIdVrsqrtps_ExtendedIndex = 202, - kX86InstIdVrsqrtss_ExtendedIndex = 193, - kX86InstIdVshufpd_ExtendedIndex = 196, - kX86InstIdVshufps_ExtendedIndex = 196, - kX86InstIdVsqrtpd_ExtendedIndex = 202, - kX86InstIdVsqrtps_ExtendedIndex = 202, - kX86InstIdVsqrtsd_ExtendedIndex = 193, - kX86InstIdVsqrtss_ExtendedIndex = 193, - kX86InstIdVstmxcsr_ExtendedIndex = 218, - kX86InstIdVsubpd_ExtendedIndex = 192, - kX86InstIdVsubps_ExtendedIndex = 192, - kX86InstIdVsubsd_ExtendedIndex = 193, - kX86InstIdVsubss_ExtendedIndex = 193, - kX86InstIdVtestpd_ExtendedIndex = 273, - kX86InstIdVtestps_ExtendedIndex = 273, - kX86InstIdVucomisd_ExtendedIndex = 274, - kX86InstIdVucomiss_ExtendedIndex = 274, - kX86InstIdVunpckhpd_ExtendedIndex = 192, - kX86InstIdVunpckhps_ExtendedIndex = 192, - kX86InstIdVunpcklpd_ExtendedIndex = 192, - kX86InstIdVunpcklps_ExtendedIndex = 192, - kX86InstIdVxorpd_ExtendedIndex = 192, - kX86InstIdVxorps_ExtendedIndex = 192, - kX86InstIdVzeroall_ExtendedIndex = 275, - kX86InstIdVzeroupper_ExtendedIndex = 275, - kX86InstIdWrfsbase_ExtendedIndex = 276, - kX86InstIdWrgsbase_ExtendedIndex = 276, - kX86InstIdXadd_ExtendedIndex = 277, - kX86InstIdXchg_ExtendedIndex = 278, + kX86InstIdVaddpd_ExtendedIndex = 191, + kX86InstIdVaddps_ExtendedIndex = 191, + kX86InstIdVaddsd_ExtendedIndex = 192, + kX86InstIdVaddss_ExtendedIndex = 192, + kX86InstIdVaddsubpd_ExtendedIndex = 191, + kX86InstIdVaddsubps_ExtendedIndex = 191, + kX86InstIdVaesdec_ExtendedIndex = 192, + kX86InstIdVaesdeclast_ExtendedIndex = 192, + kX86InstIdVaesenc_ExtendedIndex = 192, + kX86InstIdVaesenclast_ExtendedIndex = 192, + kX86InstIdVaesimc_ExtendedIndex = 193, + kX86InstIdVaeskeygenassist_ExtendedIndex = 194, + kX86InstIdVandnpd_ExtendedIndex = 191, + kX86InstIdVandnps_ExtendedIndex = 191, + kX86InstIdVandpd_ExtendedIndex = 191, + kX86InstIdVandps_ExtendedIndex = 191, + kX86InstIdVblendpd_ExtendedIndex = 195, + kX86InstIdVblendps_ExtendedIndex = 195, + kX86InstIdVblendvpd_ExtendedIndex = 196, + kX86InstIdVblendvps_ExtendedIndex = 196, + kX86InstIdVbroadcastf128_ExtendedIndex = 197, + kX86InstIdVbroadcasti128_ExtendedIndex = 197, + kX86InstIdVbroadcastsd_ExtendedIndex = 198, + kX86InstIdVbroadcastss_ExtendedIndex = 199, + kX86InstIdVcmppd_ExtendedIndex = 195, + kX86InstIdVcmpps_ExtendedIndex = 195, + kX86InstIdVcmpsd_ExtendedIndex = 200, + kX86InstIdVcmpss_ExtendedIndex = 200, + kX86InstIdVcomisd_ExtendedIndex = 193, + kX86InstIdVcomiss_ExtendedIndex = 193, + kX86InstIdVcvtdq2pd_ExtendedIndex = 199, + kX86InstIdVcvtdq2ps_ExtendedIndex = 201, + kX86InstIdVcvtpd2dq_ExtendedIndex = 202, + kX86InstIdVcvtpd2ps_ExtendedIndex = 202, + kX86InstIdVcvtph2ps_ExtendedIndex = 199, + kX86InstIdVcvtps2dq_ExtendedIndex = 201, + kX86InstIdVcvtps2pd_ExtendedIndex = 199, + kX86InstIdVcvtps2ph_ExtendedIndex = 203, + kX86InstIdVcvtsd2si_ExtendedIndex = 204, + kX86InstIdVcvtsd2ss_ExtendedIndex = 192, + kX86InstIdVcvtsi2sd_ExtendedIndex = 205, + kX86InstIdVcvtsi2ss_ExtendedIndex = 205, + kX86InstIdVcvtss2sd_ExtendedIndex = 192, + kX86InstIdVcvtss2si_ExtendedIndex = 204, + kX86InstIdVcvttpd2dq_ExtendedIndex = 206, + kX86InstIdVcvttps2dq_ExtendedIndex = 201, + kX86InstIdVcvttsd2si_ExtendedIndex = 204, + kX86InstIdVcvttss2si_ExtendedIndex = 204, + kX86InstIdVdivpd_ExtendedIndex = 191, + kX86InstIdVdivps_ExtendedIndex = 191, + kX86InstIdVdivsd_ExtendedIndex = 192, + kX86InstIdVdivss_ExtendedIndex = 192, + kX86InstIdVdppd_ExtendedIndex = 200, + kX86InstIdVdpps_ExtendedIndex = 195, + kX86InstIdVextractf128_ExtendedIndex = 207, + kX86InstIdVextracti128_ExtendedIndex = 207, + kX86InstIdVextractps_ExtendedIndex = 208, + kX86InstIdVfmadd132pd_ExtendedIndex = 191, + kX86InstIdVfmadd132ps_ExtendedIndex = 191, + kX86InstIdVfmadd132sd_ExtendedIndex = 192, + kX86InstIdVfmadd132ss_ExtendedIndex = 192, + kX86InstIdVfmadd213pd_ExtendedIndex = 191, + kX86InstIdVfmadd213ps_ExtendedIndex = 191, + kX86InstIdVfmadd213sd_ExtendedIndex = 192, + kX86InstIdVfmadd213ss_ExtendedIndex = 192, + kX86InstIdVfmadd231pd_ExtendedIndex = 191, + kX86InstIdVfmadd231ps_ExtendedIndex = 191, + kX86InstIdVfmadd231sd_ExtendedIndex = 192, + kX86InstIdVfmadd231ss_ExtendedIndex = 192, + kX86InstIdVfmaddpd_ExtendedIndex = 209, + kX86InstIdVfmaddps_ExtendedIndex = 209, + kX86InstIdVfmaddsd_ExtendedIndex = 210, + kX86InstIdVfmaddss_ExtendedIndex = 210, + kX86InstIdVfmaddsub132pd_ExtendedIndex = 191, + kX86InstIdVfmaddsub132ps_ExtendedIndex = 191, + kX86InstIdVfmaddsub213pd_ExtendedIndex = 191, + kX86InstIdVfmaddsub213ps_ExtendedIndex = 191, + kX86InstIdVfmaddsub231pd_ExtendedIndex = 191, + kX86InstIdVfmaddsub231ps_ExtendedIndex = 191, + kX86InstIdVfmaddsubpd_ExtendedIndex = 209, + kX86InstIdVfmaddsubps_ExtendedIndex = 209, + kX86InstIdVfmsub132pd_ExtendedIndex = 191, + kX86InstIdVfmsub132ps_ExtendedIndex = 191, + kX86InstIdVfmsub132sd_ExtendedIndex = 192, + kX86InstIdVfmsub132ss_ExtendedIndex = 192, + kX86InstIdVfmsub213pd_ExtendedIndex = 191, + kX86InstIdVfmsub213ps_ExtendedIndex = 191, + kX86InstIdVfmsub213sd_ExtendedIndex = 192, + kX86InstIdVfmsub213ss_ExtendedIndex = 192, + kX86InstIdVfmsub231pd_ExtendedIndex = 191, + kX86InstIdVfmsub231ps_ExtendedIndex = 191, + kX86InstIdVfmsub231sd_ExtendedIndex = 192, + kX86InstIdVfmsub231ss_ExtendedIndex = 192, + kX86InstIdVfmsubadd132pd_ExtendedIndex = 191, + kX86InstIdVfmsubadd132ps_ExtendedIndex = 191, + kX86InstIdVfmsubadd213pd_ExtendedIndex = 191, + kX86InstIdVfmsubadd213ps_ExtendedIndex = 191, + kX86InstIdVfmsubadd231pd_ExtendedIndex = 191, + kX86InstIdVfmsubadd231ps_ExtendedIndex = 191, + kX86InstIdVfmsubaddpd_ExtendedIndex = 209, + kX86InstIdVfmsubaddps_ExtendedIndex = 209, + kX86InstIdVfmsubpd_ExtendedIndex = 209, + kX86InstIdVfmsubps_ExtendedIndex = 209, + kX86InstIdVfmsubsd_ExtendedIndex = 210, + kX86InstIdVfmsubss_ExtendedIndex = 210, + kX86InstIdVfnmadd132pd_ExtendedIndex = 191, + kX86InstIdVfnmadd132ps_ExtendedIndex = 191, + kX86InstIdVfnmadd132sd_ExtendedIndex = 192, + kX86InstIdVfnmadd132ss_ExtendedIndex = 192, + kX86InstIdVfnmadd213pd_ExtendedIndex = 191, + kX86InstIdVfnmadd213ps_ExtendedIndex = 191, + kX86InstIdVfnmadd213sd_ExtendedIndex = 192, + kX86InstIdVfnmadd213ss_ExtendedIndex = 192, + kX86InstIdVfnmadd231pd_ExtendedIndex = 191, + kX86InstIdVfnmadd231ps_ExtendedIndex = 191, + kX86InstIdVfnmadd231sd_ExtendedIndex = 192, + kX86InstIdVfnmadd231ss_ExtendedIndex = 192, + kX86InstIdVfnmaddpd_ExtendedIndex = 209, + kX86InstIdVfnmaddps_ExtendedIndex = 209, + kX86InstIdVfnmaddsd_ExtendedIndex = 210, + kX86InstIdVfnmaddss_ExtendedIndex = 210, + kX86InstIdVfnmsub132pd_ExtendedIndex = 191, + kX86InstIdVfnmsub132ps_ExtendedIndex = 191, + kX86InstIdVfnmsub132sd_ExtendedIndex = 192, + kX86InstIdVfnmsub132ss_ExtendedIndex = 192, + kX86InstIdVfnmsub213pd_ExtendedIndex = 191, + kX86InstIdVfnmsub213ps_ExtendedIndex = 191, + kX86InstIdVfnmsub213sd_ExtendedIndex = 192, + kX86InstIdVfnmsub213ss_ExtendedIndex = 192, + kX86InstIdVfnmsub231pd_ExtendedIndex = 191, + kX86InstIdVfnmsub231ps_ExtendedIndex = 191, + kX86InstIdVfnmsub231sd_ExtendedIndex = 192, + kX86InstIdVfnmsub231ss_ExtendedIndex = 192, + kX86InstIdVfnmsubpd_ExtendedIndex = 209, + kX86InstIdVfnmsubps_ExtendedIndex = 209, + kX86InstIdVfnmsubsd_ExtendedIndex = 210, + kX86InstIdVfnmsubss_ExtendedIndex = 210, + kX86InstIdVfrczpd_ExtendedIndex = 211, + kX86InstIdVfrczps_ExtendedIndex = 211, + kX86InstIdVfrczsd_ExtendedIndex = 212, + kX86InstIdVfrczss_ExtendedIndex = 212, + kX86InstIdVgatherdpd_ExtendedIndex = 213, + kX86InstIdVgatherdps_ExtendedIndex = 213, + kX86InstIdVgatherqpd_ExtendedIndex = 213, + kX86InstIdVgatherqps_ExtendedIndex = 214, + kX86InstIdVhaddpd_ExtendedIndex = 191, + kX86InstIdVhaddps_ExtendedIndex = 191, + kX86InstIdVhsubpd_ExtendedIndex = 191, + kX86InstIdVhsubps_ExtendedIndex = 191, + kX86InstIdVinsertf128_ExtendedIndex = 215, + kX86InstIdVinserti128_ExtendedIndex = 215, + kX86InstIdVinsertps_ExtendedIndex = 200, + kX86InstIdVlddqu_ExtendedIndex = 216, + kX86InstIdVldmxcsr_ExtendedIndex = 217, + kX86InstIdVmaskmovdqu_ExtendedIndex = 218, + kX86InstIdVmaskmovpd_ExtendedIndex = 219, + kX86InstIdVmaskmovps_ExtendedIndex = 220, + kX86InstIdVmaxpd_ExtendedIndex = 191, + kX86InstIdVmaxps_ExtendedIndex = 191, + kX86InstIdVmaxsd_ExtendedIndex = 191, + kX86InstIdVmaxss_ExtendedIndex = 191, + kX86InstIdVminpd_ExtendedIndex = 191, + kX86InstIdVminps_ExtendedIndex = 191, + kX86InstIdVminsd_ExtendedIndex = 191, + kX86InstIdVminss_ExtendedIndex = 191, + kX86InstIdVmovapd_ExtendedIndex = 221, + kX86InstIdVmovaps_ExtendedIndex = 222, + kX86InstIdVmovd_ExtendedIndex = 223, + kX86InstIdVmovddup_ExtendedIndex = 201, + kX86InstIdVmovdqa_ExtendedIndex = 224, + kX86InstIdVmovdqu_ExtendedIndex = 225, + kX86InstIdVmovhlps_ExtendedIndex = 226, + kX86InstIdVmovhpd_ExtendedIndex = 227, + kX86InstIdVmovhps_ExtendedIndex = 228, + kX86InstIdVmovlhps_ExtendedIndex = 226, + kX86InstIdVmovlpd_ExtendedIndex = 229, + kX86InstIdVmovlps_ExtendedIndex = 230, + kX86InstIdVmovmskpd_ExtendedIndex = 231, + kX86InstIdVmovmskps_ExtendedIndex = 231, + kX86InstIdVmovntdq_ExtendedIndex = 232, + kX86InstIdVmovntdqa_ExtendedIndex = 216, + kX86InstIdVmovntpd_ExtendedIndex = 232, + kX86InstIdVmovntps_ExtendedIndex = 232, + kX86InstIdVmovq_ExtendedIndex = 223, + kX86InstIdVmovsd_ExtendedIndex = 233, + kX86InstIdVmovshdup_ExtendedIndex = 201, + kX86InstIdVmovsldup_ExtendedIndex = 201, + kX86InstIdVmovss_ExtendedIndex = 234, + kX86InstIdVmovupd_ExtendedIndex = 235, + kX86InstIdVmovups_ExtendedIndex = 236, + kX86InstIdVmpsadbw_ExtendedIndex = 195, + kX86InstIdVmulpd_ExtendedIndex = 191, + kX86InstIdVmulps_ExtendedIndex = 191, + kX86InstIdVmulsd_ExtendedIndex = 191, + kX86InstIdVmulss_ExtendedIndex = 191, + kX86InstIdVorpd_ExtendedIndex = 191, + kX86InstIdVorps_ExtendedIndex = 191, + kX86InstIdVpabsb_ExtendedIndex = 201, + kX86InstIdVpabsd_ExtendedIndex = 201, + kX86InstIdVpabsw_ExtendedIndex = 201, + kX86InstIdVpackssdw_ExtendedIndex = 191, + kX86InstIdVpacksswb_ExtendedIndex = 191, + kX86InstIdVpackusdw_ExtendedIndex = 191, + kX86InstIdVpackuswb_ExtendedIndex = 191, + kX86InstIdVpaddb_ExtendedIndex = 191, + kX86InstIdVpaddd_ExtendedIndex = 191, + kX86InstIdVpaddq_ExtendedIndex = 191, + kX86InstIdVpaddsb_ExtendedIndex = 191, + kX86InstIdVpaddsw_ExtendedIndex = 191, + kX86InstIdVpaddusb_ExtendedIndex = 191, + kX86InstIdVpaddusw_ExtendedIndex = 191, + kX86InstIdVpaddw_ExtendedIndex = 191, + kX86InstIdVpalignr_ExtendedIndex = 195, + kX86InstIdVpand_ExtendedIndex = 191, + kX86InstIdVpandn_ExtendedIndex = 191, + kX86InstIdVpavgb_ExtendedIndex = 191, + kX86InstIdVpavgw_ExtendedIndex = 191, + kX86InstIdVpblendd_ExtendedIndex = 195, + kX86InstIdVpblendvb_ExtendedIndex = 237, + kX86InstIdVpblendw_ExtendedIndex = 195, + kX86InstIdVpbroadcastb_ExtendedIndex = 199, + kX86InstIdVpbroadcastd_ExtendedIndex = 199, + kX86InstIdVpbroadcastq_ExtendedIndex = 199, + kX86InstIdVpbroadcastw_ExtendedIndex = 199, + kX86InstIdVpclmulqdq_ExtendedIndex = 200, + kX86InstIdVpcmov_ExtendedIndex = 238, + kX86InstIdVpcmpeqb_ExtendedIndex = 191, + kX86InstIdVpcmpeqd_ExtendedIndex = 191, + kX86InstIdVpcmpeqq_ExtendedIndex = 191, + kX86InstIdVpcmpeqw_ExtendedIndex = 191, + kX86InstIdVpcmpestri_ExtendedIndex = 194, + kX86InstIdVpcmpestrm_ExtendedIndex = 194, + kX86InstIdVpcmpgtb_ExtendedIndex = 191, + kX86InstIdVpcmpgtd_ExtendedIndex = 191, + kX86InstIdVpcmpgtq_ExtendedIndex = 191, + kX86InstIdVpcmpgtw_ExtendedIndex = 191, + kX86InstIdVpcmpistri_ExtendedIndex = 194, + kX86InstIdVpcmpistrm_ExtendedIndex = 194, + kX86InstIdVpcomb_ExtendedIndex = 239, + kX86InstIdVpcomd_ExtendedIndex = 239, + kX86InstIdVpcomq_ExtendedIndex = 239, + kX86InstIdVpcomub_ExtendedIndex = 239, + kX86InstIdVpcomud_ExtendedIndex = 239, + kX86InstIdVpcomuq_ExtendedIndex = 239, + kX86InstIdVpcomuw_ExtendedIndex = 239, + kX86InstIdVpcomw_ExtendedIndex = 239, + kX86InstIdVperm2f128_ExtendedIndex = 240, + kX86InstIdVperm2i128_ExtendedIndex = 240, + kX86InstIdVpermd_ExtendedIndex = 241, + kX86InstIdVpermil2pd_ExtendedIndex = 242, + kX86InstIdVpermil2ps_ExtendedIndex = 242, + kX86InstIdVpermilpd_ExtendedIndex = 243, + kX86InstIdVpermilps_ExtendedIndex = 244, + kX86InstIdVpermpd_ExtendedIndex = 245, + kX86InstIdVpermps_ExtendedIndex = 241, + kX86InstIdVpermq_ExtendedIndex = 245, + kX86InstIdVpextrb_ExtendedIndex = 246, + kX86InstIdVpextrd_ExtendedIndex = 208, + kX86InstIdVpextrq_ExtendedIndex = 247, + kX86InstIdVpextrw_ExtendedIndex = 248, + kX86InstIdVpgatherdd_ExtendedIndex = 213, + kX86InstIdVpgatherdq_ExtendedIndex = 213, + kX86InstIdVpgatherqd_ExtendedIndex = 214, + kX86InstIdVpgatherqq_ExtendedIndex = 213, + kX86InstIdVphaddbd_ExtendedIndex = 212, + kX86InstIdVphaddbq_ExtendedIndex = 212, + kX86InstIdVphaddbw_ExtendedIndex = 212, + kX86InstIdVphaddd_ExtendedIndex = 191, + kX86InstIdVphadddq_ExtendedIndex = 212, + kX86InstIdVphaddsw_ExtendedIndex = 191, + kX86InstIdVphaddubd_ExtendedIndex = 212, + kX86InstIdVphaddubq_ExtendedIndex = 212, + kX86InstIdVphaddubw_ExtendedIndex = 212, + kX86InstIdVphaddudq_ExtendedIndex = 212, + kX86InstIdVphadduwd_ExtendedIndex = 212, + kX86InstIdVphadduwq_ExtendedIndex = 212, + kX86InstIdVphaddw_ExtendedIndex = 191, + kX86InstIdVphaddwd_ExtendedIndex = 212, + kX86InstIdVphaddwq_ExtendedIndex = 212, + kX86InstIdVphminposuw_ExtendedIndex = 193, + kX86InstIdVphsubbw_ExtendedIndex = 212, + kX86InstIdVphsubd_ExtendedIndex = 191, + kX86InstIdVphsubdq_ExtendedIndex = 212, + kX86InstIdVphsubsw_ExtendedIndex = 191, + kX86InstIdVphsubw_ExtendedIndex = 191, + kX86InstIdVphsubwd_ExtendedIndex = 212, + kX86InstIdVpinsrb_ExtendedIndex = 249, + kX86InstIdVpinsrd_ExtendedIndex = 250, + kX86InstIdVpinsrq_ExtendedIndex = 251, + kX86InstIdVpinsrw_ExtendedIndex = 252, + kX86InstIdVpmacsdd_ExtendedIndex = 253, + kX86InstIdVpmacsdqh_ExtendedIndex = 253, + kX86InstIdVpmacsdql_ExtendedIndex = 253, + kX86InstIdVpmacssdd_ExtendedIndex = 253, + kX86InstIdVpmacssdqh_ExtendedIndex = 253, + kX86InstIdVpmacssdql_ExtendedIndex = 253, + kX86InstIdVpmacsswd_ExtendedIndex = 253, + kX86InstIdVpmacssww_ExtendedIndex = 253, + kX86InstIdVpmacswd_ExtendedIndex = 253, + kX86InstIdVpmacsww_ExtendedIndex = 253, + kX86InstIdVpmadcsswd_ExtendedIndex = 253, + kX86InstIdVpmadcswd_ExtendedIndex = 253, + kX86InstIdVpmaddubsw_ExtendedIndex = 191, + kX86InstIdVpmaddwd_ExtendedIndex = 191, + kX86InstIdVpmaskmovd_ExtendedIndex = 254, + kX86InstIdVpmaskmovq_ExtendedIndex = 254, + kX86InstIdVpmaxsb_ExtendedIndex = 191, + kX86InstIdVpmaxsd_ExtendedIndex = 191, + kX86InstIdVpmaxsw_ExtendedIndex = 191, + kX86InstIdVpmaxub_ExtendedIndex = 191, + kX86InstIdVpmaxud_ExtendedIndex = 191, + kX86InstIdVpmaxuw_ExtendedIndex = 191, + kX86InstIdVpminsb_ExtendedIndex = 191, + kX86InstIdVpminsd_ExtendedIndex = 191, + kX86InstIdVpminsw_ExtendedIndex = 191, + kX86InstIdVpminub_ExtendedIndex = 191, + kX86InstIdVpminud_ExtendedIndex = 191, + kX86InstIdVpminuw_ExtendedIndex = 191, + kX86InstIdVpmovmskb_ExtendedIndex = 231, + kX86InstIdVpmovsxbd_ExtendedIndex = 201, + kX86InstIdVpmovsxbq_ExtendedIndex = 201, + kX86InstIdVpmovsxbw_ExtendedIndex = 201, + kX86InstIdVpmovsxdq_ExtendedIndex = 201, + kX86InstIdVpmovsxwd_ExtendedIndex = 201, + kX86InstIdVpmovsxwq_ExtendedIndex = 201, + kX86InstIdVpmovzxbd_ExtendedIndex = 201, + kX86InstIdVpmovzxbq_ExtendedIndex = 201, + kX86InstIdVpmovzxbw_ExtendedIndex = 201, + kX86InstIdVpmovzxdq_ExtendedIndex = 201, + kX86InstIdVpmovzxwd_ExtendedIndex = 201, + kX86InstIdVpmovzxwq_ExtendedIndex = 201, + kX86InstIdVpmuldq_ExtendedIndex = 191, + kX86InstIdVpmulhrsw_ExtendedIndex = 191, + kX86InstIdVpmulhuw_ExtendedIndex = 191, + kX86InstIdVpmulhw_ExtendedIndex = 191, + kX86InstIdVpmulld_ExtendedIndex = 191, + kX86InstIdVpmullw_ExtendedIndex = 191, + kX86InstIdVpmuludq_ExtendedIndex = 191, + kX86InstIdVpor_ExtendedIndex = 191, + kX86InstIdVpperm_ExtendedIndex = 255, + kX86InstIdVprotb_ExtendedIndex = 256, + kX86InstIdVprotd_ExtendedIndex = 257, + kX86InstIdVprotq_ExtendedIndex = 258, + kX86InstIdVprotw_ExtendedIndex = 259, + kX86InstIdVpsadbw_ExtendedIndex = 191, + kX86InstIdVpshab_ExtendedIndex = 260, + kX86InstIdVpshad_ExtendedIndex = 260, + kX86InstIdVpshaq_ExtendedIndex = 260, + kX86InstIdVpshaw_ExtendedIndex = 260, + kX86InstIdVpshlb_ExtendedIndex = 260, + kX86InstIdVpshld_ExtendedIndex = 260, + kX86InstIdVpshlq_ExtendedIndex = 260, + kX86InstIdVpshlw_ExtendedIndex = 260, + kX86InstIdVpshufb_ExtendedIndex = 191, + kX86InstIdVpshufd_ExtendedIndex = 261, + kX86InstIdVpshufhw_ExtendedIndex = 261, + kX86InstIdVpshuflw_ExtendedIndex = 261, + kX86InstIdVpsignb_ExtendedIndex = 191, + kX86InstIdVpsignd_ExtendedIndex = 191, + kX86InstIdVpsignw_ExtendedIndex = 191, + kX86InstIdVpslld_ExtendedIndex = 262, + kX86InstIdVpslldq_ExtendedIndex = 263, + kX86InstIdVpsllq_ExtendedIndex = 264, + kX86InstIdVpsllvd_ExtendedIndex = 191, + kX86InstIdVpsllvq_ExtendedIndex = 191, + kX86InstIdVpsllw_ExtendedIndex = 265, + kX86InstIdVpsrad_ExtendedIndex = 266, + kX86InstIdVpsravd_ExtendedIndex = 191, + kX86InstIdVpsraw_ExtendedIndex = 267, + kX86InstIdVpsrld_ExtendedIndex = 268, + kX86InstIdVpsrldq_ExtendedIndex = 263, + kX86InstIdVpsrlq_ExtendedIndex = 269, + kX86InstIdVpsrlvd_ExtendedIndex = 191, + kX86InstIdVpsrlvq_ExtendedIndex = 191, + kX86InstIdVpsrlw_ExtendedIndex = 270, + kX86InstIdVpsubb_ExtendedIndex = 191, + kX86InstIdVpsubd_ExtendedIndex = 191, + kX86InstIdVpsubq_ExtendedIndex = 191, + kX86InstIdVpsubsb_ExtendedIndex = 191, + kX86InstIdVpsubsw_ExtendedIndex = 191, + kX86InstIdVpsubusb_ExtendedIndex = 191, + kX86InstIdVpsubusw_ExtendedIndex = 191, + kX86InstIdVpsubw_ExtendedIndex = 191, + kX86InstIdVptest_ExtendedIndex = 271, + kX86InstIdVpunpckhbw_ExtendedIndex = 191, + kX86InstIdVpunpckhdq_ExtendedIndex = 191, + kX86InstIdVpunpckhqdq_ExtendedIndex = 191, + kX86InstIdVpunpckhwd_ExtendedIndex = 191, + kX86InstIdVpunpcklbw_ExtendedIndex = 191, + kX86InstIdVpunpckldq_ExtendedIndex = 191, + kX86InstIdVpunpcklqdq_ExtendedIndex = 191, + kX86InstIdVpunpcklwd_ExtendedIndex = 191, + kX86InstIdVpxor_ExtendedIndex = 191, + kX86InstIdVrcpps_ExtendedIndex = 201, + kX86InstIdVrcpss_ExtendedIndex = 192, + kX86InstIdVroundpd_ExtendedIndex = 261, + kX86InstIdVroundps_ExtendedIndex = 261, + kX86InstIdVroundsd_ExtendedIndex = 200, + kX86InstIdVroundss_ExtendedIndex = 200, + kX86InstIdVrsqrtps_ExtendedIndex = 201, + kX86InstIdVrsqrtss_ExtendedIndex = 192, + kX86InstIdVshufpd_ExtendedIndex = 195, + kX86InstIdVshufps_ExtendedIndex = 195, + kX86InstIdVsqrtpd_ExtendedIndex = 201, + kX86InstIdVsqrtps_ExtendedIndex = 201, + kX86InstIdVsqrtsd_ExtendedIndex = 192, + kX86InstIdVsqrtss_ExtendedIndex = 192, + kX86InstIdVstmxcsr_ExtendedIndex = 217, + kX86InstIdVsubpd_ExtendedIndex = 191, + kX86InstIdVsubps_ExtendedIndex = 191, + kX86InstIdVsubsd_ExtendedIndex = 192, + kX86InstIdVsubss_ExtendedIndex = 192, + kX86InstIdVtestpd_ExtendedIndex = 272, + kX86InstIdVtestps_ExtendedIndex = 272, + kX86InstIdVucomisd_ExtendedIndex = 273, + kX86InstIdVucomiss_ExtendedIndex = 273, + kX86InstIdVunpckhpd_ExtendedIndex = 191, + kX86InstIdVunpckhps_ExtendedIndex = 191, + kX86InstIdVunpcklpd_ExtendedIndex = 191, + kX86InstIdVunpcklps_ExtendedIndex = 191, + kX86InstIdVxorpd_ExtendedIndex = 191, + kX86InstIdVxorps_ExtendedIndex = 191, + kX86InstIdVzeroall_ExtendedIndex = 274, + kX86InstIdVzeroupper_ExtendedIndex = 274, + kX86InstIdWrfsbase_ExtendedIndex = 275, + kX86InstIdWrgsbase_ExtendedIndex = 275, + kX86InstIdXadd_ExtendedIndex = 276, + kX86InstIdXchg_ExtendedIndex = 277, kX86InstIdXgetbv_ExtendedIndex = 16, kX86InstIdXor_ExtendedIndex = 2, kX86InstIdXorpd_ExtendedIndex = 3, kX86InstIdXorps_ExtendedIndex = 3, - kX86InstIdXrstor_ExtendedIndex = 279, - kX86InstIdXrstor64_ExtendedIndex = 279, - kX86InstIdXsave_ExtendedIndex = 279, - kX86InstIdXsave64_ExtendedIndex = 279, - kX86InstIdXsaveopt_ExtendedIndex = 279, - kX86InstIdXsaveopt64_ExtendedIndex = 279, + kX86InstIdXrstor_ExtendedIndex = 278, + kX86InstIdXrstor64_ExtendedIndex = 278, + kX86InstIdXsave_ExtendedIndex = 278, + kX86InstIdXsave64_ExtendedIndex = 278, + kX86InstIdXsaveopt_ExtendedIndex = 278, + kX86InstIdXsaveopt64_ExtendedIndex = 278, kX86InstIdXsetbv_ExtendedIndex = 16 }; // ${X86InstData:End} @@ -4518,14 +4517,14 @@ const uint32_t _x86CondToSetcc [20] = CC_TO_INST(kX86InstIdSet ); // [asmjit::X86Util] // ============================================================================ -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) //! \internal //! //! Compare two instruction names. //! //! `a` is null terminated instruction name from `_x86InstName[]` table. //! `b` is non-null terminated instruction name passed to `getInstIdByName()`. -static ASMJIT_INLINE int X86Util_cmpInstName(const char* a, const char* b, size_t len) { +static ASMJIT_INLINE int X86Util_cmpInstName(const char* a, const char* b, size_t len) noexcept { for (size_t i = 0; i < len; i++) { int c = static_cast(static_cast(a[i])) - static_cast(static_cast(b[i])) ; @@ -4536,7 +4535,7 @@ static ASMJIT_INLINE int X86Util_cmpInstName(const char* a, const char* b, size_ return static_cast(a[len]); } -uint32_t X86Util::getInstIdByName(const char* name, size_t len) { +uint32_t X86Util::getInstIdByName(const char* name, size_t len) noexcept { if (name == nullptr) return kInstIdNone; @@ -4597,13 +4596,13 @@ uint32_t X86Util::getInstIdByName(const char* name, size_t len) { return kInstIdNone; } -#endif // ASMJIT_DISABLE_NAMES +#endif // ASMJIT_DISABLE_TEXT // ============================================================================ // [asmjit::X86Util - Test] // ============================================================================ -#if defined(ASMJIT_TEST) && !defined(ASMJIT_DISABLE_NAMES) +#if defined(ASMJIT_TEST) && !defined(ASMJIT_DISABLE_TEXT) UNIT(x86_inst_name) { // All known instructions should be matched. INFO("Matching all X86/X64 instructions."); @@ -4630,7 +4629,7 @@ UNIT(x86_inst_name) { EXPECT(X86Util::getInstIdByName("123xyz") == kInstIdNone, "Should return kInstIdNone for unknown instruction."); } -#endif // ASMJIT_TEST && !ASMJIT_DISABLE_NAMES +#endif // ASMJIT_TEST && !ASMJIT_DISABLE_TEXT } // asmjit namespace diff --git a/src/asmjit/x86/x86inst.h b/src/asmjit/x86/x86inst.h index 2c9ce72..249d74e 100644 --- a/src/asmjit/x86/x86inst.h +++ b/src/asmjit/x86/x86inst.h @@ -34,12 +34,12 @@ struct X86InstExtendedInfo; // [asmjit::X86Inst/X86Cond - Globals] // ============================================================================ -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) //! \internal //! //! X86/X64 instructions' names, accessible through `X86InstInfo`. ASMJIT_VARAPI const char _x86InstName[]; -#endif // !ASMJIT_DISABLE_NAMES +#endif // !ASMJIT_DISABLE_TEXT //! \internal //! @@ -2209,7 +2209,7 @@ struct X86InstInfo { // [Accessors - Instruction Name] // -------------------------------------------------------------------------- -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) //! Get instruction name string (null terminated). ASMJIT_INLINE const char* getInstName() const { return _x86InstName + static_cast(_nameIndex); @@ -2219,7 +2219,7 @@ struct X86InstInfo { ASMJIT_INLINE uint32_t _getNameIndex() const { return _nameIndex; } -#endif // !ASMJIT_DISABLE_NAMES +#endif // !ASMJIT_DISABLE_TEXT // -------------------------------------------------------------------------- // [Accessors - Extended-Info] @@ -2335,54 +2335,54 @@ struct X86Util { //! //! \note `instId` has to be valid instruction ID, it can't be greater than //! or equal to `_kX86InstIdCount`. It asserts in debug mode. - static ASMJIT_INLINE const X86InstInfo& getInstInfo(uint32_t instId) { + static ASMJIT_INLINE const X86InstInfo& getInstInfo(uint32_t instId) noexcept { ASMJIT_ASSERT(instId < _kX86InstIdCount); return _x86InstInfo[instId]; } -#if !defined(ASMJIT_DISABLE_NAMES) +#if !defined(ASMJIT_DISABLE_TEXT) //! Get an instruction ID from a given instruction `name`. //! //! If there is an exact match the instruction id is returned, otherwise //! `kInstIdNone` (zero) is returned. //! //! The given `name` doesn't have to be null-terminated if `len` is provided. - ASMJIT_API static uint32_t getInstIdByName(const char* name, size_t len = kInvalidIndex); -#endif // !ASMJIT_DISABLE_NAMES + ASMJIT_API static uint32_t getInstIdByName(const char* name, size_t len = kInvalidIndex) noexcept; +#endif // !ASMJIT_DISABLE_TEXT // -------------------------------------------------------------------------- // [Condition Codes] // -------------------------------------------------------------------------- //! Corresponds to transposing the operands of a comparison. - static ASMJIT_INLINE uint32_t reverseCond(uint32_t cond) { + static ASMJIT_INLINE uint32_t reverseCond(uint32_t cond) noexcept { ASMJIT_ASSERT(cond < ASMJIT_ARRAY_SIZE(_x86ReverseCond)); return _x86ReverseCond[cond]; } //! Get the equivalent of negated condition code. - static ASMJIT_INLINE uint32_t negateCond(uint32_t cond) { + static ASMJIT_INLINE uint32_t negateCond(uint32_t cond) noexcept { ASMJIT_ASSERT(cond < ASMJIT_ARRAY_SIZE(_x86ReverseCond)); return cond ^ static_cast(cond < kX86CondNone); } //! Translate condition code `cc` to `cmovcc` instruction code. //! \sa \ref X86InstId, \ref _kX86InstIdCmovcc. - static ASMJIT_INLINE uint32_t condToCmovcc(uint32_t cond) { + static ASMJIT_INLINE uint32_t condToCmovcc(uint32_t cond) noexcept { ASMJIT_ASSERT(static_cast(cond) < ASMJIT_ARRAY_SIZE(_x86CondToCmovcc)); return _x86CondToCmovcc[cond]; } //! Translate condition code `cc` to `jcc` instruction code. //! \sa \ref X86InstId, \ref _kX86InstIdJcc. - static ASMJIT_INLINE uint32_t condToJcc(uint32_t cond) { + static ASMJIT_INLINE uint32_t condToJcc(uint32_t cond) noexcept { ASMJIT_ASSERT(static_cast(cond) < ASMJIT_ARRAY_SIZE(_x86CondToJcc)); return _x86CondToJcc[cond]; } //! Translate condition code `cc` to `setcc` instruction code. //! \sa \ref X86InstId, \ref _kX86InstIdSetcc. - static ASMJIT_INLINE uint32_t condToSetcc(uint32_t cond) { + static ASMJIT_INLINE uint32_t condToSetcc(uint32_t cond) noexcept { ASMJIT_ASSERT(static_cast(cond) < ASMJIT_ARRAY_SIZE(_x86CondToSetcc)); return _x86CondToSetcc[cond]; } @@ -2397,8 +2397,8 @@ struct X86Util { //! \param b Position of the second component [0, 1], inclusive. //! //! Shuffle constants can be used to encode an immediate for these instructions: - //! - `X86Assembler::shufpd()` and `X86Compiler::shufpd()` - static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b) { + //! - `shufpd` + static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b) noexcept { ASMJIT_ASSERT(a <= 0x1 && b <= 0x1); uint32_t result = (a << 1) | b; return static_cast(result); @@ -2412,12 +2412,12 @@ struct X86Util { //! \param d Position of the fourth component [0, 3], inclusive. //! //! Shuffle constants can be used to encode an immediate for these instructions: - //! - `X86Assembler::pshufw()` and `X86Compiler::pshufw()`. - //! - `X86Assembler::pshufd()` and `X86Compiler::pshufd()`. - //! - `X86Assembler::pshufhw()` and `X86Compiler::pshufhw()`. - //! - `X86Assembler::pshuflw()` and `X86Compiler::pshuflw()`. - //! - `X86Assembler::shufps()` and `X86Compiler::shufps()`. - static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b, uint32_t c, uint32_t d) { + //! - `pshufw()` + //! - `pshufd()` + //! - `pshuflw()` + //! - `pshufhw()` + //! - `shufps()` + static ASMJIT_INLINE int shuffle(uint32_t a, uint32_t b, uint32_t c, uint32_t d) noexcept { ASMJIT_ASSERT(a <= 0x3 && b <= 0x3 && c <= 0x3 && d <= 0x3); uint32_t result = (a << 6) | (b << 4) | (c << 2) | d; return static_cast(result); diff --git a/src/asmjit/x86/x86operand.cpp b/src/asmjit/x86/x86operand.cpp index 7e300ee..0fcf1c4 100644 --- a/src/asmjit/x86/x86operand.cpp +++ b/src/asmjit/x86/x86operand.cpp @@ -24,17 +24,17 @@ namespace x86 { // [asmjit::X86Mem - abs[]] // ============================================================================ -X86Mem ptr_abs(Ptr pAbs, int32_t disp, uint32_t size) { +X86Mem ptr_abs(Ptr p, int32_t disp, uint32_t size) noexcept { X86Mem m(NoInit); - m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue); + m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, 0, kInvalidValue); m._vmem.index = kInvalidValue; - m._vmem.displacement = static_cast((intptr_t)(pAbs + disp)); + m._vmem.displacement = static_cast((intptr_t)(p + disp)); return m; } -X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) { +X86Mem ptr_abs(Ptr p, const X86Reg& index, uint32_t shift, int32_t disp, uint32_t size) noexcept { X86Mem m(NoInit); uint32_t flags = shift << kX86MemShiftIndex; @@ -45,15 +45,15 @@ X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift, int32_t disp, uint else if (index.isYmm()) flags |= kX86MemVSibYmm << kX86MemVSibIndex; - m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue); + m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue); m._vmem.index = index.getRegIndex(); - m._vmem.displacement = static_cast((intptr_t)(pAbs + disp)); + m._vmem.displacement = static_cast((intptr_t)(p + disp)); return m; } #if !defined(ASMJIT_DISABLE_COMPILER) -X86Mem ptr_abs(Ptr pAbs, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) { +X86Mem ptr_abs(Ptr p, const X86Var& index, uint32_t shift, int32_t disp, uint32_t size) noexcept { X86Mem m(NoInit); uint32_t flags = shift << kX86MemShiftIndex; @@ -67,9 +67,9 @@ X86Mem ptr_abs(Ptr pAbs, const X86Var& index, uint32_t shift, int32_t disp, uint else if (indexRegType == kX86RegTypeYmm) flags |= kX86MemVSibYmm << kX86MemVSibIndex; - m._init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue); + m._init_packed_op_sz_b0_b1_id(Operand::kTypeMem, size, kMemTypeAbsolute, flags, kInvalidValue); m._vmem.index = index_.getId(); - m._vmem.displacement = static_cast((intptr_t)(pAbs + disp)); + m._vmem.displacement = static_cast((intptr_t)(p + disp)); return m; } diff --git a/src/asmjit/x86/x86operand.h b/src/asmjit/x86/x86operand.h index a3bb0bd..76d32c6 100644 --- a/src/asmjit/x86/x86operand.h +++ b/src/asmjit/x86/x86operand.h @@ -31,8 +31,6 @@ namespace asmjit { // ============================================================================ struct X86Reg; -struct X86RipReg; -struct X86SegReg; struct X86GpReg; struct X86FpReg; struct X86MmReg; @@ -41,6 +39,9 @@ struct X86XmmReg; struct X86YmmReg; struct X86ZmmReg; +struct X86SegReg; +struct X86RipReg; + #if !defined(ASMJIT_DISABLE_COMPILER) struct X86Var; struct X86GpVar; @@ -58,7 +59,7 @@ struct X86ZmmVar; // [asmjit::X86RegClass] // ============================================================================ -//! X86/X64 variable class. +//! X86/X64 register class. ASMJIT_ENUM(X86RegClass) { // -------------------------------------------------------------------------- // [Regs & Vars] @@ -250,7 +251,7 @@ ASMJIT_ENUM(X86MemFlags) { }; // ============================================================================ -// [asmjit::k86VarType] +// [asmjit::X86VarType] // ============================================================================ //! X86/X64 variable type. @@ -316,7 +317,7 @@ ASMJIT_ENUM(X86VarType) { //! \internal //! -//! X86/X64 registers count (Gp, Mm, K, Xmm/Ymm/Zmm). +//! X86/X64 registers count. //! //! Since the number of registers changed across CPU generations `X86RegCount` //! class is used by `X86Assembler` and `X86Compiler` to provide a way to get @@ -339,93 +340,86 @@ struct X86RegCount { // -------------------------------------------------------------------------- //! Reset all counters to zero. - ASMJIT_INLINE void reset() { _packed = 0; } + ASMJIT_INLINE void reset() noexcept { _packed = 0; } // -------------------------------------------------------------------------- // [Get] // -------------------------------------------------------------------------- - //! Get register count by `classId`. - ASMJIT_INLINE uint32_t get(uint32_t classId) const { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); - return _regs[classId]; + //! Get register count by a register class `rc`. + ASMJIT_INLINE uint32_t get(uint32_t rc) const noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); + + uint32_t shift = Utils::byteShiftOfDWordStruct(rc); + return (_packed >> shift) & static_cast(0xFF); } - //! Get Gp register count. - ASMJIT_INLINE uint32_t getGp() const { return _regs[kX86RegClassGp]; } - //! Get Mm register count. - ASMJIT_INLINE uint32_t getMm() const { return _regs[kX86RegClassMm]; } - //! Get K register count. - ASMJIT_INLINE uint32_t getK() const { return _regs[kX86RegClassK]; } - //! Get Xmm/Ymm/Zmm register count. - ASMJIT_INLINE uint32_t getXyz() const { return _regs[kX86RegClassXyz]; } + //! Get Gp count. + ASMJIT_INLINE uint32_t getGp() const noexcept { return get(kX86RegClassGp); } + //! Get Mm count. + ASMJIT_INLINE uint32_t getMm() const noexcept { return get(kX86RegClassMm); } + //! Get K count. + ASMJIT_INLINE uint32_t getK() const noexcept { return get(kX86RegClassK); } + //! Get Xmm/Ymm/Zmm count. + ASMJIT_INLINE uint32_t getXyz() const noexcept { return get(kX86RegClassXyz); } // -------------------------------------------------------------------------- // [Set] // -------------------------------------------------------------------------- - //! Set register count by `classId`. - ASMJIT_INLINE void set(uint32_t classId, uint32_t n) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + //! Set register count by a register class `rc`. + ASMJIT_INLINE void set(uint32_t rc, uint32_t n) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); ASMJIT_ASSERT(n <= 0xFF); - _regs[classId] = static_cast(n); + uint32_t shift = Utils::byteShiftOfDWordStruct(rc); + _packed = (_packed & ~static_cast(0xFF << shift)) + (n << shift); } - //! Set Gp register count. - ASMJIT_INLINE void setGp(uint32_t n) { set(kX86RegClassGp, n); } - //! Set Mm register count. - ASMJIT_INLINE void setMm(uint32_t n) { set(kX86RegClassMm, n); } - //! Set K register count. - ASMJIT_INLINE void setK(uint32_t n) { set(kX86RegClassK, n); } - //! Set Xmm/Ymm/Zmm register count. - ASMJIT_INLINE void setXyz(uint32_t n) { set(kX86RegClassXyz, n); } + //! Set Gp count. + ASMJIT_INLINE void setGp(uint32_t n) noexcept { set(kX86RegClassGp, n); } + //! Set Mm count. + ASMJIT_INLINE void setMm(uint32_t n) noexcept { set(kX86RegClassMm, n); } + //! Set K count. + ASMJIT_INLINE void setK(uint32_t n) noexcept { set(kX86RegClassK, n); } + //! Set Xmm/Ymm/Zmm count. + ASMJIT_INLINE void setXyz(uint32_t n) noexcept { set(kX86RegClassXyz, n); } // -------------------------------------------------------------------------- // [Add] // -------------------------------------------------------------------------- - //! Add register count by `classId`. - ASMJIT_INLINE void add(uint32_t classId, uint32_t n = 1) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); - ASMJIT_ASSERT(0xFF - static_cast(_regs[classId]) >= n); + //! Add register count by a register class `rc`. + ASMJIT_INLINE void add(uint32_t rc, uint32_t n = 1) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); + ASMJIT_ASSERT(0xFF - static_cast(_regs[rc]) >= n); - _regs[classId] += static_cast(n); + uint32_t shift = Utils::byteShiftOfDWordStruct(rc); + _packed += n << shift; } - //! Add Gp register count. - ASMJIT_INLINE void addGp(uint32_t n) { add(kX86RegClassGp, n); } - //! Add Mm register count. - ASMJIT_INLINE void addMm(uint32_t n) { add(kX86RegClassMm, n); } - //! Add K register count. - ASMJIT_INLINE void addK(uint32_t n) { add(kX86RegClassK, n); } - //! Add Xmm/Ymm/Zmm register count. - ASMJIT_INLINE void addXyz(uint32_t n) { add(kX86RegClassXyz, n); } + //! Add Gp count. + ASMJIT_INLINE void addGp(uint32_t n) noexcept { add(kX86RegClassGp, n); } + //! Add Mm count. + ASMJIT_INLINE void addMm(uint32_t n) noexcept { add(kX86RegClassMm, n); } + //! Add K count. + ASMJIT_INLINE void addK(uint32_t n) noexcept { add(kX86RegClassK, n); } + //! Add Xmm/Ymm/Zmm count. + ASMJIT_INLINE void addXyz(uint32_t n) noexcept { add(kX86RegClassXyz, n); } // -------------------------------------------------------------------------- // [Misc] // -------------------------------------------------------------------------- - //! Build a register indexes, based on register's `count`. - //! - //! Register index is used by \ref `X86Compiler` in per-instruction register - //! data. Indexes are sorted by register class in Gp, Mm, K, and Xmm/Ymm/Zmm - //! order. - ASMJIT_INLINE void indexFromRegCount(const X86RegCount& count) { - uint32_t x = count._regs[0]; - uint32_t y; + //! Build register indexes based on the given `count` of registers. + ASMJIT_INLINE void indexFromRegCount(const X86RegCount& count) noexcept { + uint32_t x = static_cast(count._regs[0]); + uint32_t y = static_cast(count._regs[1]) + x; + uint32_t z = static_cast(count._regs[2]) + y; - _regs[0] = static_cast(0); - _regs[1] = static_cast(x); - - x = x + count._regs[1]; - y = x + count._regs[2]; - - ASMJIT_ASSERT(x <= 0xFF); ASMJIT_ASSERT(y <= 0xFF); - - _regs[2] = static_cast(x); - _regs[3] = static_cast(y); + ASMJIT_ASSERT(z <= 0xFF); + _packed = Utils::pack32_4x8(0, x, y, z); } // -------------------------------------------------------------------------- @@ -442,8 +436,6 @@ struct X86RegCount { uint8_t _k; //! Count of Xmm/Ymm/Zmm registers. uint8_t _xyz; - //! \internal - uint8_t _reserved[3]; }; uint8_t _regs[4]; @@ -457,14 +449,14 @@ struct X86RegCount { //! \internal //! -//! X86/X64 registers mask (Gp, Mm, K, Xmm/Ymm/Zmm). +//! X86/X64 registers mask. struct X86RegMask { // -------------------------------------------------------------------------- // [Reset] // -------------------------------------------------------------------------- //! Reset all register masks to zero. - ASMJIT_INLINE void reset() { + ASMJIT_INLINE void reset() noexcept { _packed.reset(); } @@ -473,14 +465,14 @@ struct X86RegMask { // -------------------------------------------------------------------------- //! Get whether all register masks are zero (empty). - ASMJIT_INLINE bool isEmpty() const { + ASMJIT_INLINE bool isEmpty() const noexcept { return _packed.isZero(); } - ASMJIT_INLINE bool has(uint32_t classId, uint32_t mask = 0xFFFFFFFF) const { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE bool has(uint32_t rc, uint32_t mask = 0xFFFFFFFF) const noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : return (static_cast(_gp ) & mask) != 0; case kX86RegClassMm : return (static_cast(_mm ) & mask) != 0; case kX86RegClassK : return (static_cast(_k ) & mask) != 0; @@ -490,19 +482,19 @@ struct X86RegMask { return false; } - ASMJIT_INLINE bool hasGp(uint32_t mask = 0xFFFFFFFF) const { return has(kX86RegClassGp, mask); } - ASMJIT_INLINE bool hasMm(uint32_t mask = 0xFFFFFFFF) const { return has(kX86RegClassMm, mask); } - ASMJIT_INLINE bool hasK(uint32_t mask = 0xFFFFFFFF) const { return has(kX86RegClassK, mask); } - ASMJIT_INLINE bool hasXyz(uint32_t mask = 0xFFFFFFFF) const { return has(kX86RegClassXyz, mask); } + ASMJIT_INLINE bool hasGp(uint32_t mask = 0xFFFFFFFF) const noexcept { return has(kX86RegClassGp, mask); } + ASMJIT_INLINE bool hasMm(uint32_t mask = 0xFFFFFFFF) const noexcept { return has(kX86RegClassMm, mask); } + ASMJIT_INLINE bool hasK(uint32_t mask = 0xFFFFFFFF) const noexcept { return has(kX86RegClassK, mask); } + ASMJIT_INLINE bool hasXyz(uint32_t mask = 0xFFFFFFFF) const noexcept { return has(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [Get] // -------------------------------------------------------------------------- - ASMJIT_INLINE uint32_t get(uint32_t classId) const { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE uint32_t get(uint32_t rc) const noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : return _gp; case kX86RegClassMm : return _mm; case kX86RegClassK : return _k; @@ -512,19 +504,19 @@ struct X86RegMask { return 0; } - ASMJIT_INLINE uint32_t getGp() const { return get(kX86RegClassGp); } - ASMJIT_INLINE uint32_t getMm() const { return get(kX86RegClassMm); } - ASMJIT_INLINE uint32_t getK() const { return get(kX86RegClassK); } - ASMJIT_INLINE uint32_t getXyz() const { return get(kX86RegClassXyz); } + ASMJIT_INLINE uint32_t getGp() const noexcept { return get(kX86RegClassGp); } + ASMJIT_INLINE uint32_t getMm() const noexcept { return get(kX86RegClassMm); } + ASMJIT_INLINE uint32_t getK() const noexcept { return get(kX86RegClassK); } + ASMJIT_INLINE uint32_t getXyz() const noexcept { return get(kX86RegClassXyz); } // -------------------------------------------------------------------------- // [Zero] // -------------------------------------------------------------------------- - ASMJIT_INLINE void zero(uint32_t classId) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE void zero(uint32_t rc) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : _gp = 0; break; case kX86RegClassMm : _mm = 0; break; case kX86RegClassK : _k = 0; break; @@ -532,23 +524,23 @@ struct X86RegMask { } } - ASMJIT_INLINE void zeroGp() { zero(kX86RegClassGp); } - ASMJIT_INLINE void zeroMm() { zero(kX86RegClassMm); } - ASMJIT_INLINE void zeroK() { zero(kX86RegClassK); } - ASMJIT_INLINE void zeroXyz() { zero(kX86RegClassXyz); } + ASMJIT_INLINE void zeroGp() noexcept { zero(kX86RegClassGp); } + ASMJIT_INLINE void zeroMm() noexcept { zero(kX86RegClassMm); } + ASMJIT_INLINE void zeroK() noexcept { zero(kX86RegClassK); } + ASMJIT_INLINE void zeroXyz() noexcept { zero(kX86RegClassXyz); } // -------------------------------------------------------------------------- // [Set] // -------------------------------------------------------------------------- - ASMJIT_INLINE void set(const X86RegMask& other) { + ASMJIT_INLINE void set(const X86RegMask& other) noexcept { _packed = other._packed; } - ASMJIT_INLINE void set(uint32_t classId, uint32_t mask) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE void set(uint32_t rc, uint32_t mask) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : _gp = static_cast(mask); break; case kX86RegClassMm : _mm = static_cast(mask); break; case kX86RegClassK : _k = static_cast(mask); break; @@ -556,23 +548,23 @@ struct X86RegMask { } } - ASMJIT_INLINE void setGp(uint32_t mask) { return set(kX86RegClassGp, mask); } - ASMJIT_INLINE void setMm(uint32_t mask) { return set(kX86RegClassMm, mask); } - ASMJIT_INLINE void setK(uint32_t mask) { return set(kX86RegClassK, mask); } - ASMJIT_INLINE void setXyz(uint32_t mask) { return set(kX86RegClassXyz, mask); } + ASMJIT_INLINE void setGp(uint32_t mask) noexcept { return set(kX86RegClassGp, mask); } + ASMJIT_INLINE void setMm(uint32_t mask) noexcept { return set(kX86RegClassMm, mask); } + ASMJIT_INLINE void setK(uint32_t mask) noexcept { return set(kX86RegClassK, mask); } + ASMJIT_INLINE void setXyz(uint32_t mask) noexcept { return set(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [And] // -------------------------------------------------------------------------- - ASMJIT_INLINE void and_(const X86RegMask& other) { + ASMJIT_INLINE void and_(const X86RegMask& other) noexcept { _packed.and_(other._packed); } - ASMJIT_INLINE void and_(uint32_t classId, uint32_t mask) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE void and_(uint32_t rc, uint32_t mask) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : _gp &= static_cast(mask); break; case kX86RegClassMm : _mm &= static_cast(mask); break; case kX86RegClassK : _k &= static_cast(mask); break; @@ -580,23 +572,23 @@ struct X86RegMask { } } - ASMJIT_INLINE void andGp(uint32_t mask) { and_(kX86RegClassGp, mask); } - ASMJIT_INLINE void andMm(uint32_t mask) { and_(kX86RegClassMm, mask); } - ASMJIT_INLINE void andK(uint32_t mask) { and_(kX86RegClassK, mask); } - ASMJIT_INLINE void andXyz(uint32_t mask) { and_(kX86RegClassXyz, mask); } + ASMJIT_INLINE void andGp(uint32_t mask) noexcept { and_(kX86RegClassGp, mask); } + ASMJIT_INLINE void andMm(uint32_t mask) noexcept { and_(kX86RegClassMm, mask); } + ASMJIT_INLINE void andK(uint32_t mask) noexcept { and_(kX86RegClassK, mask); } + ASMJIT_INLINE void andXyz(uint32_t mask) noexcept { and_(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [AndNot] // -------------------------------------------------------------------------- - ASMJIT_INLINE void andNot(const X86RegMask& other) { + ASMJIT_INLINE void andNot(const X86RegMask& other) noexcept { _packed.andNot(other._packed); } - ASMJIT_INLINE void andNot(uint32_t classId, uint32_t mask) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE void andNot(uint32_t rc, uint32_t mask) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : _gp &= ~static_cast(mask); break; case kX86RegClassMm : _mm &= ~static_cast(mask); break; case kX86RegClassK : _k &= ~static_cast(mask); break; @@ -604,22 +596,22 @@ struct X86RegMask { } } - ASMJIT_INLINE void andNotGp(uint32_t mask) { andNot(kX86RegClassGp, mask); } - ASMJIT_INLINE void andNotMm(uint32_t mask) { andNot(kX86RegClassMm, mask); } - ASMJIT_INLINE void andNotK(uint32_t mask) { andNot(kX86RegClassK, mask); } - ASMJIT_INLINE void andNotXyz(uint32_t mask) { andNot(kX86RegClassXyz, mask); } + ASMJIT_INLINE void andNotGp(uint32_t mask) noexcept { andNot(kX86RegClassGp, mask); } + ASMJIT_INLINE void andNotMm(uint32_t mask) noexcept { andNot(kX86RegClassMm, mask); } + ASMJIT_INLINE void andNotK(uint32_t mask) noexcept { andNot(kX86RegClassK, mask); } + ASMJIT_INLINE void andNotXyz(uint32_t mask) noexcept { andNot(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [Or] // -------------------------------------------------------------------------- - ASMJIT_INLINE void or_(const X86RegMask& other) { + ASMJIT_INLINE void or_(const X86RegMask& other) noexcept { _packed.or_(other._packed); } - ASMJIT_INLINE void or_(uint32_t classId, uint32_t mask) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); - switch (classId) { + ASMJIT_INLINE void or_(uint32_t rc, uint32_t mask) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); + switch (rc) { case kX86RegClassGp : _gp |= static_cast(mask); break; case kX86RegClassMm : _mm |= static_cast(mask); break; case kX86RegClassK : _k |= static_cast(mask); break; @@ -627,23 +619,23 @@ struct X86RegMask { } } - ASMJIT_INLINE void orGp(uint32_t mask) { return or_(kX86RegClassGp, mask); } - ASMJIT_INLINE void orMm(uint32_t mask) { return or_(kX86RegClassMm, mask); } - ASMJIT_INLINE void orK(uint32_t mask) { return or_(kX86RegClassK, mask); } - ASMJIT_INLINE void orXyz(uint32_t mask) { return or_(kX86RegClassXyz, mask); } + ASMJIT_INLINE void orGp(uint32_t mask) noexcept { return or_(kX86RegClassGp, mask); } + ASMJIT_INLINE void orMm(uint32_t mask) noexcept { return or_(kX86RegClassMm, mask); } + ASMJIT_INLINE void orK(uint32_t mask) noexcept { return or_(kX86RegClassK, mask); } + ASMJIT_INLINE void orXyz(uint32_t mask) noexcept { return or_(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [Xor] // -------------------------------------------------------------------------- - ASMJIT_INLINE void xor_(const X86RegMask& other) { + ASMJIT_INLINE void xor_(const X86RegMask& other) noexcept { _packed.xor_(other._packed); } - ASMJIT_INLINE void xor_(uint32_t classId, uint32_t mask) { - ASMJIT_ASSERT(classId < _kX86RegClassManagedCount); + ASMJIT_INLINE void xor_(uint32_t rc, uint32_t mask) noexcept { + ASMJIT_ASSERT(rc < _kX86RegClassManagedCount); - switch (classId) { + switch (rc) { case kX86RegClassGp : _gp ^= static_cast(mask); break; case kX86RegClassMm : _mm ^= static_cast(mask); break; case kX86RegClassK : _k ^= static_cast(mask); break; @@ -651,10 +643,10 @@ struct X86RegMask { } } - ASMJIT_INLINE void xorGp(uint32_t mask) { xor_(kX86RegClassGp, mask); } - ASMJIT_INLINE void xorMm(uint32_t mask) { xor_(kX86RegClassMm, mask); } - ASMJIT_INLINE void xorK(uint32_t mask) { xor_(kX86RegClassK, mask); } - ASMJIT_INLINE void xorXyz(uint32_t mask) { xor_(kX86RegClassXyz, mask); } + ASMJIT_INLINE void xorGp(uint32_t mask) noexcept { xor_(kX86RegClassGp, mask); } + ASMJIT_INLINE void xorMm(uint32_t mask) noexcept { xor_(kX86RegClassMm, mask); } + ASMJIT_INLINE void xorK(uint32_t mask) noexcept { xor_(kX86RegClassK, mask); } + ASMJIT_INLINE void xorXyz(uint32_t mask) noexcept { xor_(kX86RegClassXyz, mask); } // -------------------------------------------------------------------------- // [Members] @@ -682,7 +674,7 @@ struct X86RegMask { // ============================================================================ // This is only defined by `x86operand_regs.cpp` when exporting registers. -#if defined(ASMJIT_EXPORTS_X86OPERAND_REGS) +#if defined(ASMJIT_EXPORTS_X86_REGS) // Remap all classes to POD structs so they can be statically initialized // without calling a constructor. Compiler will store these in .DATA section. @@ -711,15 +703,15 @@ struct X86Reg : public Reg { // -------------------------------------------------------------------------- //! Create a dummy X86 register. - ASMJIT_INLINE X86Reg() : Reg() {} + ASMJIT_INLINE X86Reg() noexcept : Reg() {} //! Create a reference to `other` X86 register. - ASMJIT_INLINE X86Reg(const X86Reg& other) : Reg(other) {} + ASMJIT_INLINE X86Reg(const X86Reg& other) noexcept : Reg(other) {} //! Create a reference to `other` X86 register and change the index to `index`. - ASMJIT_INLINE X86Reg(const X86Reg& other, uint32_t index) : Reg(other, index) {} + ASMJIT_INLINE X86Reg(const X86Reg& other, uint32_t index) noexcept : Reg(other, index) {} //! Create a custom X86 register. - ASMJIT_INLINE X86Reg(uint32_t type, uint32_t index, uint32_t size) : Reg(type, index, size) {} + ASMJIT_INLINE X86Reg(uint32_t type, uint32_t index, uint32_t size) noexcept : Reg(type, index, size) {} //! Create non-initialized X86 register. - explicit ASMJIT_INLINE X86Reg(const _NoInit&) : Reg(NoInit) {} + explicit ASMJIT_INLINE X86Reg(const _NoInit&) noexcept : Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86Reg Specific] @@ -728,50 +720,50 @@ struct X86Reg : public Reg { ASMJIT_REG_OP(X86Reg) //! Get whether the register is Gp register. - ASMJIT_INLINE bool isGp() const { return _vreg.type <= kX86RegTypeGpq; } + ASMJIT_INLINE bool isGp() const noexcept { return _vreg.type <= kX86RegTypeGpq; } //! Get whether the register is Gp byte (8-bit) register. - ASMJIT_INLINE bool isGpb() const { return _vreg.type <= _kX86RegTypePatchedGpbHi; } + ASMJIT_INLINE bool isGpb() const noexcept { return _vreg.type <= _kX86RegTypePatchedGpbHi; } //! Get whether the register is Gp lo-byte (8-bit) register. - ASMJIT_INLINE bool isGpbLo() const { return _vreg.type == kX86RegTypeGpbLo; } + ASMJIT_INLINE bool isGpbLo() const noexcept { return _vreg.type == kX86RegTypeGpbLo; } //! Get whether the register is Gp hi-byte (8-bit) register. - ASMJIT_INLINE bool isGpbHi() const { return _vreg.type == kX86RegTypeGpbHi; } + ASMJIT_INLINE bool isGpbHi() const noexcept { return _vreg.type == kX86RegTypeGpbHi; } //! Get whether the register is Gp word (16-bit) register. - ASMJIT_INLINE bool isGpw() const { return _vreg.type == kX86RegTypeGpw; } + ASMJIT_INLINE bool isGpw() const noexcept { return _vreg.type == kX86RegTypeGpw; } //! Get whether the register is Gp dword (32-bit) register. - ASMJIT_INLINE bool isGpd() const { return _vreg.type == kX86RegTypeGpd; } + ASMJIT_INLINE bool isGpd() const noexcept { return _vreg.type == kX86RegTypeGpd; } //! Get whether the register is Gp qword (64-bit) register. - ASMJIT_INLINE bool isGpq() const { return _vreg.type == kX86RegTypeGpq; } + ASMJIT_INLINE bool isGpq() const noexcept { return _vreg.type == kX86RegTypeGpq; } //! Get whether the register is Fp register. - ASMJIT_INLINE bool isFp() const { return _vreg.type == kX86RegTypeFp; } + ASMJIT_INLINE bool isFp() const noexcept { return _vreg.type == kX86RegTypeFp; } //! Get whether the register is Mm (64-bit) register. - ASMJIT_INLINE bool isMm() const { return _vreg.type == kX86RegTypeMm; } + ASMJIT_INLINE bool isMm() const noexcept { return _vreg.type == kX86RegTypeMm; } //! Get whether the register is K (64-bit) register. - ASMJIT_INLINE bool isK() const { return _vreg.type == kX86RegTypeK; } + ASMJIT_INLINE bool isK() const noexcept { return _vreg.type == kX86RegTypeK; } //! Get whether the register is Xmm (128-bit) register. - ASMJIT_INLINE bool isXmm() const { return _vreg.type == kX86RegTypeXmm; } + ASMJIT_INLINE bool isXmm() const noexcept { return _vreg.type == kX86RegTypeXmm; } //! Get whether the register is Ymm (256-bit) register. - ASMJIT_INLINE bool isYmm() const { return _vreg.type == kX86RegTypeYmm; } + ASMJIT_INLINE bool isYmm() const noexcept { return _vreg.type == kX86RegTypeYmm; } //! Get whether the register is Zmm (512-bit) register. - ASMJIT_INLINE bool isZmm() const { return _vreg.type == kX86RegTypeZmm; } + ASMJIT_INLINE bool isZmm() const noexcept { return _vreg.type == kX86RegTypeZmm; } //! Get whether the register is RIP. - ASMJIT_INLINE bool isRip() const { return _vreg.type == kX86RegTypeRip; } + ASMJIT_INLINE bool isRip() const noexcept { return _vreg.type == kX86RegTypeRip; } //! Get whether the register is Segment. - ASMJIT_INLINE bool isSeg() const { return _vreg.type == kX86RegTypeSeg; } + ASMJIT_INLINE bool isSeg() const noexcept { return _vreg.type == kX86RegTypeSeg; } // -------------------------------------------------------------------------- // [Statics] // -------------------------------------------------------------------------- //! Get whether the `op` operand is Gpb-Lo or Gpb-Hi register. - static ASMJIT_INLINE bool isGpbReg(const Operand& op) { + static ASMJIT_INLINE bool isGpbReg(const Operand& op) noexcept { const uint32_t mask = Utils::pack32_2x8_1x16( 0xFF, 0xFF, ~(_kX86RegTypePatchedGpbHi << 8) & 0xFF00); - return (op._packed[0].u32[0] & mask) == Utils::pack32_2x8_1x16(kOperandTypeReg, 1, 0x0000); + return (op._packed[0].u32[0] & mask) == Utils::pack32_2x8_1x16(kTypeReg, 1, 0x0000); } }; @@ -786,11 +778,11 @@ struct X86RipReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a RIP register. - ASMJIT_INLINE X86RipReg() : X86Reg(kX86RegTypeRip, 0, 0) {} + ASMJIT_INLINE X86RipReg() noexcept : X86Reg(kX86RegTypeRip, 0, 0) {} //! Create a reference to `other` RIP register. - ASMJIT_INLINE X86RipReg(const X86RipReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86RipReg(const X86RipReg& other) noexcept : X86Reg(other) {} //! Create non-initialized RIP register. - explicit ASMJIT_INLINE X86RipReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86RipReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86RipReg Specific] @@ -810,15 +802,15 @@ struct X86SegReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy segment register. - ASMJIT_INLINE X86SegReg() : X86Reg() {} + ASMJIT_INLINE X86SegReg() noexcept : X86Reg() {} //! Create a reference to `other` segment register. - ASMJIT_INLINE X86SegReg(const X86SegReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86SegReg(const X86SegReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` segment register and change the index to `index`. - ASMJIT_INLINE X86SegReg(const X86SegReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86SegReg(const X86SegReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom segment register. - ASMJIT_INLINE X86SegReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86SegReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized segment register. - explicit ASMJIT_INLINE X86SegReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86SegReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86SegReg Specific] @@ -838,15 +830,15 @@ struct X86GpReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Gp register. - ASMJIT_INLINE X86GpReg() : X86Reg() {} + ASMJIT_INLINE X86GpReg() noexcept : X86Reg() {} //! Create a reference to `other` Gp register. - ASMJIT_INLINE X86GpReg(const X86GpReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86GpReg(const X86GpReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Gp register and change the index to `index`. - ASMJIT_INLINE X86GpReg(const X86GpReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86GpReg(const X86GpReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Gp register. - ASMJIT_INLINE X86GpReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86GpReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Gp register. - explicit ASMJIT_INLINE X86GpReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86GpReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86GpReg Specific] @@ -863,23 +855,23 @@ struct X86GpReg : public X86Reg { //! This function has been designed to help with maintaining code that runs //! in both 32-bit and 64-bit modes. If you have registers that have mixed //! types, use `X86GpReg::as()` to cast one type to another. - ASMJIT_INLINE X86GpReg as(const X86GpReg& other) const { + ASMJIT_INLINE X86GpReg as(const X86GpReg& other) const noexcept { return X86GpReg(other.getRegType(), getRegIndex(), other.getSize()); } //! Cast this register to 8-bit (LO) part. - ASMJIT_INLINE X86GpReg r8() const { return X86GpReg(kX86RegTypeGpbLo, getRegIndex(), 1); } + ASMJIT_INLINE X86GpReg r8() const noexcept { return X86GpReg(kX86RegTypeGpbLo, getRegIndex(), 1); } //! Cast this register to 8-bit (LO) part. - ASMJIT_INLINE X86GpReg r8Lo() const { return X86GpReg(kX86RegTypeGpbLo, getRegIndex(), 1); } + ASMJIT_INLINE X86GpReg r8Lo() const noexcept { return X86GpReg(kX86RegTypeGpbLo, getRegIndex(), 1); } //! Cast this register to 8-bit (HI) part. - ASMJIT_INLINE X86GpReg r8Hi() const { return X86GpReg(kX86RegTypeGpbHi, getRegIndex(), 1); } + ASMJIT_INLINE X86GpReg r8Hi() const noexcept { return X86GpReg(kX86RegTypeGpbHi, getRegIndex(), 1); } //! Cast this register to 16-bit. - ASMJIT_INLINE X86GpReg r16() const { return X86GpReg(kX86RegTypeGpw, getRegIndex(), 2); } + ASMJIT_INLINE X86GpReg r16() const noexcept { return X86GpReg(kX86RegTypeGpw, getRegIndex(), 2); } //! Cast this register to 32-bit. - ASMJIT_INLINE X86GpReg r32() const { return X86GpReg(kX86RegTypeGpd, getRegIndex(), 4); } + ASMJIT_INLINE X86GpReg r32() const noexcept { return X86GpReg(kX86RegTypeGpd, getRegIndex(), 4); } //! Cast this register to 64-bit. - ASMJIT_INLINE X86GpReg r64() const { return X86GpReg(kX86RegTypeGpq, getRegIndex(), 8); } + ASMJIT_INLINE X86GpReg r64() const noexcept { return X86GpReg(kX86RegTypeGpq, getRegIndex(), 8); } }; // ============================================================================ @@ -893,15 +885,15 @@ struct X86FpReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Fp register. - ASMJIT_INLINE X86FpReg() : X86Reg() {} + ASMJIT_INLINE X86FpReg() noexcept : X86Reg() {} //! Create a reference to `other` Fp register. - ASMJIT_INLINE X86FpReg(const X86FpReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86FpReg(const X86FpReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Fp register and change the index to `index`. - ASMJIT_INLINE X86FpReg(const X86FpReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86FpReg(const X86FpReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Fp register. - ASMJIT_INLINE X86FpReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86FpReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Fp register. - explicit ASMJIT_INLINE X86FpReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86FpReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86FpReg Specific] @@ -946,15 +938,15 @@ struct X86MmReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Mm register. - ASMJIT_INLINE X86MmReg() : X86Reg() {} + ASMJIT_INLINE X86MmReg() noexcept : X86Reg() {} //! Create a reference to `other` Mm register. - ASMJIT_INLINE X86MmReg(const X86MmReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86MmReg(const X86MmReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Mm register and change the index to `index`. - ASMJIT_INLINE X86MmReg(const X86MmReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86MmReg(const X86MmReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Mm register. - ASMJIT_INLINE X86MmReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86MmReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Mm register. - explicit ASMJIT_INLINE X86MmReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86MmReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86MmReg Specific] @@ -974,15 +966,15 @@ struct X86KReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy K register. - ASMJIT_INLINE X86KReg() : X86Reg() {} + ASMJIT_INLINE X86KReg() noexcept : X86Reg() {} //! Create a reference to `other` K register. - ASMJIT_INLINE X86KReg(const X86KReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86KReg(const X86KReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` K register and change the index to `index`. - ASMJIT_INLINE X86KReg(const X86KReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86KReg(const X86KReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom K register. - ASMJIT_INLINE X86KReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86KReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized K register. - explicit ASMJIT_INLINE X86KReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86KReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86KReg Specific] @@ -1055,15 +1047,15 @@ struct X86XmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Xmm register. - ASMJIT_INLINE X86XmmReg() : X86Reg() {} + ASMJIT_INLINE X86XmmReg() noexcept : X86Reg() {} //! Create a reference to `other` Xmm register. - ASMJIT_INLINE X86XmmReg(const X86XmmReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86XmmReg(const X86XmmReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Xmm register and change the index to `index`. - ASMJIT_INLINE X86XmmReg(const X86XmmReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86XmmReg(const X86XmmReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Xmm register. - ASMJIT_INLINE X86XmmReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86XmmReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Xmm register. - explicit ASMJIT_INLINE X86XmmReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86XmmReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86XmmReg Specific] @@ -1076,11 +1068,11 @@ struct X86XmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Cast this register to Xmm (clone). - ASMJIT_INLINE X86XmmReg xmm() const { return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } + ASMJIT_INLINE X86XmmReg xmm() const noexcept { return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } //! Cast this register to Ymm. - ASMJIT_INLINE X86YmmReg ymm() const; + ASMJIT_INLINE X86YmmReg ymm() const noexcept; //! Cast this register to Zmm. - ASMJIT_INLINE X86ZmmReg zmm() const; + ASMJIT_INLINE X86ZmmReg zmm() const noexcept; }; // ============================================================================ @@ -1116,15 +1108,15 @@ struct X86YmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Ymm register. - ASMJIT_INLINE X86YmmReg() : X86Reg() {} + ASMJIT_INLINE X86YmmReg() noexcept : X86Reg() {} //! Create a reference to `other` Ymm register. - ASMJIT_INLINE X86YmmReg(const X86YmmReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86YmmReg(const X86YmmReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Ymm register and change the index to `index`. - ASMJIT_INLINE X86YmmReg(const X86YmmReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86YmmReg(const X86YmmReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Ymm register. - ASMJIT_INLINE X86YmmReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86YmmReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Ymm register. - explicit ASMJIT_INLINE X86YmmReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86YmmReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86YmmReg Specific] @@ -1137,14 +1129,14 @@ struct X86YmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Cast this register to Xmm. - ASMJIT_INLINE X86XmmReg xmm() const { return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } + ASMJIT_INLINE X86XmmReg xmm() const noexcept{ return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } //! Cast this register to Ymm (clone). - ASMJIT_INLINE X86YmmReg ymm() const { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } + ASMJIT_INLINE X86YmmReg ymm() const noexcept { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } //! Cast this register to Zmm. - ASMJIT_INLINE X86ZmmReg zmm() const; + ASMJIT_INLINE X86ZmmReg zmm() const noexcept; }; -ASMJIT_INLINE X86YmmReg X86XmmReg::ymm() const { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } +ASMJIT_INLINE X86YmmReg X86XmmReg::ymm() const noexcept { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } // ============================================================================ // [asmjit::X86ZmmReg] @@ -1157,15 +1149,15 @@ struct X86ZmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Create a dummy Zmm register. - ASMJIT_INLINE X86ZmmReg() : X86Reg() {} + ASMJIT_INLINE X86ZmmReg() noexcept : X86Reg() {} //! Create a reference to `other` Zmm register. - ASMJIT_INLINE X86ZmmReg(const X86ZmmReg& other) : X86Reg(other) {} + ASMJIT_INLINE X86ZmmReg(const X86ZmmReg& other) noexcept : X86Reg(other) {} //! Create a reference to `other` Zmm register and change the index to `index`. - ASMJIT_INLINE X86ZmmReg(const X86ZmmReg& other, uint32_t index) : X86Reg(other, index) {} + ASMJIT_INLINE X86ZmmReg(const X86ZmmReg& other, uint32_t index) noexcept : X86Reg(other, index) {} //! Create a custom Zmm register. - ASMJIT_INLINE X86ZmmReg(uint32_t type, uint32_t index, uint32_t size) : X86Reg(type, index, size) {} + ASMJIT_INLINE X86ZmmReg(uint32_t type, uint32_t index, uint32_t size) noexcept : X86Reg(type, index, size) {} //! Create non-initialized Zmm register. - explicit ASMJIT_INLINE X86ZmmReg(const _NoInit&) : X86Reg(NoInit) {} + explicit ASMJIT_INLINE X86ZmmReg(const _NoInit&) noexcept : X86Reg(NoInit) {} // -------------------------------------------------------------------------- // [X86ZmmReg Specific] @@ -1178,15 +1170,15 @@ struct X86ZmmReg : public X86Reg { // -------------------------------------------------------------------------- //! Cast this register to Xmm. - ASMJIT_INLINE X86XmmReg xmm() const { return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } + ASMJIT_INLINE X86XmmReg xmm() const noexcept { return X86XmmReg(kX86RegTypeXmm, getRegIndex(), 16); } //! Cast this register to Ymm. - ASMJIT_INLINE X86YmmReg ymm() const { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } + ASMJIT_INLINE X86YmmReg ymm() const noexcept { return X86YmmReg(kX86RegTypeYmm, getRegIndex(), 32); } //! Cast this register to Zmm (clone). - ASMJIT_INLINE X86ZmmReg zmm() const { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } + ASMJIT_INLINE X86ZmmReg zmm() const noexcept { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } }; -ASMJIT_INLINE X86ZmmReg X86XmmReg::zmm() const { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } -ASMJIT_INLINE X86ZmmReg X86YmmReg::zmm() const { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } +ASMJIT_INLINE X86ZmmReg X86XmmReg::zmm() const noexcept { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } +ASMJIT_INLINE X86ZmmReg X86YmmReg::zmm() const noexcept { return X86ZmmReg(kX86RegTypeZmm, getRegIndex(), 64); } // ============================================================================ // [asmjit::X86Mem] @@ -1198,19 +1190,19 @@ struct X86Mem : public BaseMem { // [Construction / Destruction] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86Mem() : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem() noexcept : BaseMem(NoInit) { reset(); } - ASMJIT_INLINE X86Mem(const Label& label, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeLabel, 0, label._base.id); + ASMJIT_INLINE X86Mem(const Label& label, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeLabel, 0, label._base.id); _init_packed_d2_d3(kInvalidValue, disp); } - ASMJIT_INLINE X86Mem(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeLabel, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeLabel, (kX86MemVSibGpz << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), label.getId()); @@ -1218,34 +1210,34 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86RipReg& rip, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86RipReg& rip, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_UNUSED(rip); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeRip, 0, kInvalidValue); + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeRip, 0, kInvalidValue); _init_packed_d2_d3(kInvalidValue, disp); } - ASMJIT_INLINE X86Mem(const X86GpReg& base, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + ASMJIT_INLINE X86Mem(const X86GpReg& base, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(base) + (kX86MemVSibGpz << kX86MemVSibIndex), base.getRegIndex()); _init_packed_d2_d3(kInvalidValue, disp); } - ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86GpReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86GpReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(base) + (shift << kX86MemShiftIndex), base.getRegIndex()); _vmem.index = index.getRegIndex(); _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86XmmReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86XmmReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(base) + (kX86MemVSibXmm << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), @@ -1254,10 +1246,10 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86YmmReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpReg& base, const X86YmmReg& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(base) + (kX86MemVSibYmm << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), @@ -1267,10 +1259,10 @@ struct X86Mem : public BaseMem { } #if !defined(ASMJIT_DISABLE_COMPILER) - ASMJIT_INLINE X86Mem(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeLabel, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeLabel, (kX86MemVSibGpz << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), label.getId()); @@ -1278,18 +1270,18 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86GpVar& base, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + ASMJIT_INLINE X86Mem(const X86GpVar& base, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(reinterpret_cast(base)) + (kX86MemVSibGpz << kX86MemVSibIndex), ASMJIT_OP_ID(base)); _init_packed_d2_d3(kInvalidValue, disp); } - ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(reinterpret_cast(base)) + (shift << kX86MemShiftIndex), ASMJIT_OP_ID(base)); @@ -1297,10 +1289,10 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86XmmVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86XmmVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(reinterpret_cast(base)) + (kX86MemVSibXmm << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), @@ -1309,10 +1301,10 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86YmmVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const X86GpVar& base, const X86YmmVar& index, uint32_t shift, int32_t disp, uint32_t size = 0) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, kMemTypeBaseIndex, + _init_packed_op_sz_b0_b1_id(kTypeMem, size, kMemTypeBaseIndex, _getGpdFlags(reinterpret_cast(base)) + (kX86MemVSibYmm << kX86MemVSibIndex) + (shift << kX86MemShiftIndex), @@ -1321,42 +1313,42 @@ struct X86Mem : public BaseMem { _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const _Init&, uint32_t memType, const X86Var& base, int32_t disp, uint32_t size) : BaseMem(NoInit) { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, memType, 0, ASMJIT_OP_ID(base)); + ASMJIT_INLINE X86Mem(const _Init&, uint32_t memType, const X86Var& base, int32_t disp, uint32_t size) noexcept : BaseMem(NoInit) { + _init_packed_op_sz_b0_b1_id(kTypeMem, size, memType, 0, ASMJIT_OP_ID(base)); _vmem.index = kInvalidValue; _vmem.displacement = disp; } - ASMJIT_INLINE X86Mem(const _Init&, uint32_t memType, const X86Var& base, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size) : BaseMem(NoInit) { + ASMJIT_INLINE X86Mem(const _Init&, uint32_t memType, const X86Var& base, const X86GpVar& index, uint32_t shift, int32_t disp, uint32_t size) noexcept : BaseMem(NoInit) { ASMJIT_ASSERT(shift <= 3); - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, memType, shift << kX86MemShiftIndex, ASMJIT_OP_ID(base)); + _init_packed_op_sz_b0_b1_id(kTypeMem, size, memType, shift << kX86MemShiftIndex, ASMJIT_OP_ID(base)); _vmem.index = ASMJIT_OP_ID(index); _vmem.displacement = disp; } #endif // !ASMJIT_DISABLE_COMPILER - ASMJIT_INLINE X86Mem(const X86Mem& other) : BaseMem(other) {} - explicit ASMJIT_INLINE X86Mem(const _NoInit&) : BaseMem(NoInit) {} + ASMJIT_INLINE X86Mem(const X86Mem& other) noexcept : BaseMem(other) {} + explicit ASMJIT_INLINE X86Mem(const _NoInit&) noexcept : BaseMem(NoInit) {} // -------------------------------------------------------------------------- // [X86Mem Specific] // -------------------------------------------------------------------------- //! Clone X86Mem operand. - ASMJIT_INLINE X86Mem clone() const { + ASMJIT_INLINE X86Mem clone() const noexcept { return X86Mem(*this); } //! Reset X86Mem operand. - ASMJIT_INLINE void reset() { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, 0, kMemTypeBaseIndex, 0, kInvalidValue); + ASMJIT_INLINE void reset() noexcept { + _init_packed_op_sz_b0_b1_id(kTypeMem, 0, kMemTypeBaseIndex, 0, kInvalidValue); _init_packed_d2_d3(kInvalidValue, 0); } //! \internal - ASMJIT_INLINE void _init(uint32_t memType, uint32_t base, int32_t disp, uint32_t size) { - _init_packed_op_sz_b0_b1_id(kOperandTypeMem, size, memType, 0, base); + ASMJIT_INLINE void _init(uint32_t memType, uint32_t base, int32_t disp, uint32_t size) noexcept { + _init_packed_op_sz_b0_b1_id(kTypeMem, size, memType, 0, base); _vmem.index = kInvalidValue; _vmem.displacement = disp; } @@ -1366,24 +1358,24 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has segment override prefix. - ASMJIT_INLINE bool hasSegment() const { + ASMJIT_INLINE bool hasSegment() const noexcept { return (_vmem.flags & kX86MemSegMask) != (kX86SegDefault << kX86MemSegIndex); } //! Get memory operand segment, see `X86Seg`. - ASMJIT_INLINE uint32_t getSegment() const { + ASMJIT_INLINE uint32_t getSegment() const noexcept{ return (static_cast(_vmem.flags) >> kX86MemSegIndex) & kX86MemSegBits; } //! Set memory operand segment, see `X86Seg`. - ASMJIT_INLINE X86Mem& setSegment(uint32_t segIndex) { + ASMJIT_INLINE X86Mem& setSegment(uint32_t segIndex) noexcept { _vmem.flags = static_cast( (static_cast(_vmem.flags) & kX86MemSegMask) + (segIndex << kX86MemSegIndex)); return *this; } //! Set memory operand segment, see `X86Seg`. - ASMJIT_INLINE X86Mem& setSegment(const X86SegReg& seg) { + ASMJIT_INLINE X86Mem& setSegment(const X86SegReg& seg) noexcept { return setSegment(seg.getRegIndex()); } @@ -1392,18 +1384,18 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has 32-bit GP base. - ASMJIT_INLINE bool hasGpdBase() const { + ASMJIT_INLINE bool hasGpdBase() const noexcept { return (_packed[0].u32[0] & Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask)) != 0; } //! Set whether the memory operand has 32-bit GP base. - ASMJIT_INLINE X86Mem& setGpdBase() { + ASMJIT_INLINE X86Mem& setGpdBase() noexcept { _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); return *this; } //! Set whether the memory operand has 32-bit GP base to `b`. - ASMJIT_INLINE X86Mem& setGpdBase(uint32_t b) { + ASMJIT_INLINE X86Mem& setGpdBase(uint32_t b) noexcept { _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemGpdMask); _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, b << kX86MemGpdIndex); return *this; @@ -1414,12 +1406,12 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get V-SIB type. - ASMJIT_INLINE uint32_t getVSib() const { + ASMJIT_INLINE uint32_t getVSib() const noexcept { return (static_cast(_vmem.flags) >> kX86MemVSibIndex) & kX86MemVSibBits; } //! Set V-SIB type. - ASMJIT_INLINE X86Mem& _setVSib(uint32_t vsib) { + ASMJIT_INLINE X86Mem& _setVSib(uint32_t vsib) noexcept { _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemVSibMask); _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, vsib << kX86MemVSibIndex); return *this; @@ -1430,7 +1422,7 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Set memory operand size. - ASMJIT_INLINE X86Mem& setSize(uint32_t size) { + ASMJIT_INLINE X86Mem& setSize(uint32_t size) noexcept { _vmem.size = static_cast(size); return *this; } @@ -1440,17 +1432,17 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has base register. - ASMJIT_INLINE bool hasBase() const { + ASMJIT_INLINE bool hasBase() const noexcept { return _vmem.base != kInvalidValue; } //! Get memory operand base register code, variable id, or `kInvalidValue`. - ASMJIT_INLINE uint32_t getBase() const { + ASMJIT_INLINE uint32_t getBase() const noexcept { return _vmem.base; } //! Set memory operand base register code, variable id, or `kInvalidValue`. - ASMJIT_INLINE X86Mem& setBase(uint32_t base) { + ASMJIT_INLINE X86Mem& setBase(uint32_t base) noexcept { _vmem.base = base; return *this; } @@ -1460,98 +1452,98 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has index. - ASMJIT_INLINE bool hasIndex() const { + ASMJIT_INLINE bool hasIndex() const noexcept { return _vmem.index != kInvalidValue; } //! Get memory operand index register code, variable id, or `kInvalidValue`. - ASMJIT_INLINE uint32_t getIndex() const { + ASMJIT_INLINE uint32_t getIndex() const noexcept { return _vmem.index; } //! Set memory operand index register code, variable id, or `kInvalidValue`. - ASMJIT_INLINE X86Mem& setIndex(uint32_t index) { + ASMJIT_INLINE X86Mem& setIndex(uint32_t index) noexcept { _vmem.index = index; return *this; } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86GpReg& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86GpReg& index) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibGpz); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86GpReg& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86GpReg& index, uint32_t shift) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibGpz).setShift(shift); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86XmmReg& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86XmmReg& index) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibXmm); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86XmmReg& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86XmmReg& index, uint32_t shift) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibXmm).setShift(shift); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86YmmReg& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86YmmReg& index) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibYmm); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86YmmReg& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86YmmReg& index, uint32_t shift) noexcept { _vmem.index = index.getRegIndex(); return _setVSib(kX86MemVSibYmm).setShift(shift); } #if !defined(ASMJIT_DISABLE_COMPILER) //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86GpVar& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86GpVar& index) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibGpz); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86GpVar& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86GpVar& index, uint32_t shift) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibGpz).setShift(shift); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86XmmVar& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86XmmVar& index) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibXmm); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86XmmVar& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86XmmVar& index, uint32_t shift) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibXmm).setShift(shift); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86YmmVar& index) { + ASMJIT_INLINE X86Mem& setIndex(const X86YmmVar& index) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibYmm); } //! Set memory index. - ASMJIT_INLINE X86Mem& setIndex(const X86YmmVar& index, uint32_t shift) { + ASMJIT_INLINE X86Mem& setIndex(const X86YmmVar& index, uint32_t shift) noexcept { _vmem.index = ASMJIT_OP_ID(index); return _setVSib(kX86MemVSibYmm).setShift(shift); } #endif // !ASMJIT_DISABLE_COMPILER //! Reset memory index. - ASMJIT_INLINE X86Mem& resetIndex() { + ASMJIT_INLINE X86Mem& resetIndex() noexcept { _vmem.index = kInvalidValue; return _setVSib(kX86MemVSibGpz); } @@ -1561,12 +1553,12 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has base and index register. - ASMJIT_INLINE bool hasBaseOrIndex() const { + ASMJIT_INLINE bool hasBaseOrIndex() const noexcept { return _vmem.base != kInvalidValue || _vmem.index != kInvalidValue; } //! Get whether the memory operand has base and index register. - ASMJIT_INLINE bool hasBaseAndIndex() const { + ASMJIT_INLINE bool hasBaseAndIndex() const noexcept { return _vmem.base != kInvalidValue && _vmem.index != kInvalidValue; } @@ -1575,17 +1567,17 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get whether the memory operand has shift used. - ASMJIT_INLINE bool hasShift() const { + ASMJIT_INLINE bool hasShift() const noexcept { return (_vmem.flags & kX86MemShiftMask) != 0; } - //! Get memory operand index scale (0, 1, 2 or 3). - ASMJIT_INLINE uint32_t getShift() const { + //! Get memory operand index shift (0, 1, 2 or 3). + ASMJIT_INLINE uint32_t getShift() const noexcept { return _vmem.flags >> kX86MemShiftIndex; } - //! Set memory operand index scale (0, 1, 2 or 3). - ASMJIT_INLINE X86Mem& setShift(uint32_t shift) { + //! Set memory operand index shift (0, 1, 2 or 3). + ASMJIT_INLINE X86Mem& setShift(uint32_t shift) noexcept { _packed[0].u32[0] &=~Utils::pack32_4x8(0x00, 0x00, 0x00, kX86MemShiftMask); _packed[0].u32[0] |= Utils::pack32_4x8(0x00, 0x00, 0x00, shift << kX86MemShiftIndex); return *this; @@ -1596,30 +1588,30 @@ struct X86Mem : public BaseMem { // -------------------------------------------------------------------------- //! Get memory operand relative displacement. - ASMJIT_INLINE int32_t getDisplacement() const { + ASMJIT_INLINE int32_t getDisplacement() const noexcept { return _vmem.displacement; } //! Set memory operand relative displacement. - ASMJIT_INLINE X86Mem& setDisplacement(int32_t disp) { + ASMJIT_INLINE X86Mem& setDisplacement(int32_t disp) noexcept { _vmem.displacement = disp; return *this; } //! Reset memory operand relative displacement. - ASMJIT_INLINE X86Mem& resetDisplacement() { + ASMJIT_INLINE X86Mem& resetDisplacement() noexcept { _vmem.displacement = 0; return *this; } //! Adjust memory operand relative displacement by `disp`. - ASMJIT_INLINE X86Mem& adjust(int32_t disp) { + ASMJIT_INLINE X86Mem& adjust(int32_t disp) noexcept { _vmem.displacement += disp; return *this; } //! Get new memory operand adjusted by `disp`. - ASMJIT_INLINE X86Mem adjusted(int32_t disp) const { + ASMJIT_INLINE X86Mem adjusted(int32_t disp) const noexcept { X86Mem result(*this); result.adjust(disp); return result; @@ -1629,24 +1621,24 @@ struct X86Mem : public BaseMem { // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86Mem& operator=(const X86Mem& other) { + ASMJIT_INLINE X86Mem& operator=(const X86Mem& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86Mem& other) const { - return (_packed[0] == other._packed[0]) & (_packed[1] == other._packed[1]) ; + ASMJIT_INLINE bool operator==(const X86Mem& other) const noexcept { + return (_packed[0] == other._packed[0]) && (_packed[1] == other._packed[1]) ; } - ASMJIT_INLINE bool operator!=(const X86Mem& other) const { + ASMJIT_INLINE bool operator!=(const X86Mem& other) const noexcept { return !(*this == other); } // -------------------------------------------------------------------------- - // [Static] + // [Statics] // -------------------------------------------------------------------------- - static ASMJIT_INLINE uint32_t _getGpdFlags(const Operand& base) { + static ASMJIT_INLINE uint32_t _getGpdFlags(const Operand& base) noexcept { return (base._vreg.size & 0x4) << (kX86MemGpdIndex - 2); } }; @@ -1663,54 +1655,54 @@ struct X86Var : public Var { // -------------------------------------------------------------------------- //! Create a new uninitialized `X86Var` instance. - ASMJIT_INLINE X86Var() : Var(NoInit) { reset(); } + ASMJIT_INLINE X86Var() noexcept : Var(NoInit) { reset(); } //! Create a clone of `other`. - ASMJIT_INLINE X86Var(const X86Var& other) : Var(other) {} + ASMJIT_INLINE X86Var(const X86Var& other) noexcept : Var(other) {} //! Create a new uninitialized `X86Var` instance (internal). - explicit ASMJIT_INLINE X86Var(const _NoInit&) : Var(NoInit) {} + explicit ASMJIT_INLINE X86Var(const _NoInit&) noexcept : Var(NoInit) {} // -------------------------------------------------------------------------- // [X86Var Specific] // -------------------------------------------------------------------------- //! Clone X86Var operand. - ASMJIT_INLINE X86Var clone() const { return X86Var(*this); } + ASMJIT_INLINE X86Var clone() const noexcept { return X86Var(*this); } // -------------------------------------------------------------------------- // [Type] // -------------------------------------------------------------------------- //! Get register type. - ASMJIT_INLINE uint32_t getRegType() const { return _vreg.type; } + ASMJIT_INLINE uint32_t getRegType() const noexcept { return _vreg.type; } //! Get variable type. - ASMJIT_INLINE uint32_t getVarType() const { return _vreg.vType; } + ASMJIT_INLINE uint32_t getVarType() const noexcept { return _vreg.vType; } //! Get whether the variable is Gp register. - ASMJIT_INLINE bool isGp() const { return _vreg.type <= kX86RegTypeGpq; } + ASMJIT_INLINE bool isGp() const noexcept { return _vreg.type <= kX86RegTypeGpq; } //! Get whether the variable is Gpb (8-bit) register. - ASMJIT_INLINE bool isGpb() const { return _vreg.type <= kX86RegTypeGpbHi; } + ASMJIT_INLINE bool isGpb() const noexcept { return _vreg.type <= kX86RegTypeGpbHi; } //! Get whether the variable is Gpb-lo (8-bit) register. - ASMJIT_INLINE bool isGpbLo() const { return _vreg.type == kX86RegTypeGpbLo; } + ASMJIT_INLINE bool isGpbLo() const noexcept { return _vreg.type == kX86RegTypeGpbLo; } //! Get whether the variable is Gpb-hi (8-bit) register. - ASMJIT_INLINE bool isGpbHi() const { return _vreg.type == kX86RegTypeGpbHi; } + ASMJIT_INLINE bool isGpbHi() const noexcept { return _vreg.type == kX86RegTypeGpbHi; } //! Get whether the variable is Gpw (16-bit) register. - ASMJIT_INLINE bool isGpw() const { return _vreg.type == kX86RegTypeGpw; } + ASMJIT_INLINE bool isGpw() const noexcept { return _vreg.type == kX86RegTypeGpw; } //! Get whether the variable is Gpd (32-bit) register. - ASMJIT_INLINE bool isGpd() const { return _vreg.type == kX86RegTypeGpd; } + ASMJIT_INLINE bool isGpd() const noexcept { return _vreg.type == kX86RegTypeGpd; } //! Get whether the variable is Gpq (64-bit) register. - ASMJIT_INLINE bool isGpq() const { return _vreg.type == kX86RegTypeGpq; } + ASMJIT_INLINE bool isGpq() const noexcept { return _vreg.type == kX86RegTypeGpq; } //! Get whether the variable is Mm (64-bit) register. - ASMJIT_INLINE bool isMm() const { return _vreg.type == kX86RegTypeMm; } + ASMJIT_INLINE bool isMm() const noexcept { return _vreg.type == kX86RegTypeMm; } //! Get whether the variable is K (64-bit) register. - ASMJIT_INLINE bool isK() const { return _vreg.type == kX86RegTypeK; } + ASMJIT_INLINE bool isK() const noexcept { return _vreg.type == kX86RegTypeK; } //! Get whether the variable is Xmm (128-bit) register. - ASMJIT_INLINE bool isXmm() const { return _vreg.type == kX86RegTypeXmm; } + ASMJIT_INLINE bool isXmm() const noexcept { return _vreg.type == kX86RegTypeXmm; } //! Get whether the variable is Ymm (256-bit) register. - ASMJIT_INLINE bool isYmm() const { return _vreg.type == kX86RegTypeYmm; } + ASMJIT_INLINE bool isYmm() const noexcept { return _vreg.type == kX86RegTypeYmm; } //! Get whether the variable is Zmm (512-bit) register. - ASMJIT_INLINE bool isZmm() const { return _vreg.type == kX86RegTypeZmm; } + ASMJIT_INLINE bool isZmm() const noexcept { return _vreg.type == kX86RegTypeZmm; } // -------------------------------------------------------------------------- // [Memory Cast] @@ -1720,92 +1712,92 @@ struct X86Var : public Var { //! //! \note Size of operand depends on native variable type, you can use other //! variants if you want specific one. - ASMJIT_INLINE X86Mem m(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, getSize()); } //! \overload - ASMJIT_INLINE X86Mem m(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, getSize()); } //! Cast this variable to 8-bit memory operand. - ASMJIT_INLINE X86Mem m8(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m8(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 1); } //! \overload - ASMJIT_INLINE X86Mem m8(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m8(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 1); } //! Cast this variable to 16-bit memory operand. - ASMJIT_INLINE X86Mem m16(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m16(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 2); } //! \overload - ASMJIT_INLINE X86Mem m16(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m16(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 2); } //! Cast this variable to 32-bit memory operand. - ASMJIT_INLINE X86Mem m32(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m32(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 4); } //! \overload - ASMJIT_INLINE X86Mem m32(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m32(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 4); } //! Cast this variable to 64-bit memory operand. - ASMJIT_INLINE X86Mem m64(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m64(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 8); } //! \overload - ASMJIT_INLINE X86Mem m64(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m64(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 8); } //! Cast this variable to 80-bit memory operand (long double). - ASMJIT_INLINE X86Mem m80(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m80(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 10); } //! \overload - ASMJIT_INLINE X86Mem m80(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m80(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 10); } //! Cast this variable to 128-bit memory operand. - ASMJIT_INLINE X86Mem m128(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m128(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 16); } //! \overload - ASMJIT_INLINE X86Mem m128(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m128(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 16); } //! Cast this variable to 256-bit memory operand. - ASMJIT_INLINE X86Mem m256(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m256(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 32); } //! \overload - ASMJIT_INLINE X86Mem m256(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m256(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 32); } //! Cast this variable to 256-bit memory operand. - ASMJIT_INLINE X86Mem m512(int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m512(int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, disp, 64); } //! \overload - ASMJIT_INLINE X86Mem m512(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const { + ASMJIT_INLINE X86Mem m512(const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) const noexcept { return X86Mem(Init, kMemTypeStackIndex, *this, index, shift, disp, 64); } @@ -1813,16 +1805,16 @@ struct X86Var : public Var { // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86Var& operator=(const X86Var& other) { + ASMJIT_INLINE X86Var& operator=(const X86Var& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86Var& other) const { + ASMJIT_INLINE bool operator==(const X86Var& other) const noexcept { return _packed[0] == other._packed[0]; } - ASMJIT_INLINE bool operator!=(const X86Var& other) const { + ASMJIT_INLINE bool operator!=(const X86Var& other) const noexcept { return _packed[0] != other._packed[0]; } @@ -1831,8 +1823,8 @@ struct X86Var : public Var { // -------------------------------------------------------------------------- protected: - ASMJIT_INLINE X86Var(const X86Var& other, uint32_t reg, uint32_t size) : Var(NoInit) { - _init_packed_op_sz_w0_id(kOperandTypeVar, size, (reg << 8) + other._vreg.index, other._base.id); + ASMJIT_INLINE X86Var(const X86Var& other, uint32_t reg, uint32_t size) noexcept : Var(NoInit) { + _init_packed_op_sz_w0_id(kTypeVar, size, (reg << 8) + other._vreg.index, other._base.id); _vreg.vType = other._vreg.vType; } }; @@ -1850,25 +1842,25 @@ struct X86GpVar : public X86Var { // -------------------------------------------------------------------------- protected: - ASMJIT_INLINE X86GpVar(const X86GpVar& other, uint32_t reg, uint32_t size) + ASMJIT_INLINE X86GpVar(const X86GpVar& other, uint32_t reg, uint32_t size) noexcept : X86Var(other, reg, size) {} public: //! Create a new uninitialized `X86GpVar` instance. - ASMJIT_INLINE X86GpVar() : X86Var() {} + ASMJIT_INLINE X86GpVar() noexcept : X86Var() {} //! Create a clone of `other`. - ASMJIT_INLINE X86GpVar(const X86GpVar& other) : X86Var(other) {} + ASMJIT_INLINE X86GpVar(const X86GpVar& other) noexcept : X86Var(other) {} //! Create a new uninitialized `X86GpVar` instance (internal). - explicit ASMJIT_INLINE X86GpVar(const _NoInit&) : X86Var(NoInit) {} + explicit ASMJIT_INLINE X86GpVar(const _NoInit&) noexcept : X86Var(NoInit) {} // -------------------------------------------------------------------------- // [X86GpVar Specific] // -------------------------------------------------------------------------- //! Clone X86GpVar operand. - ASMJIT_INLINE X86GpVar clone() const { return X86GpVar(*this); } + ASMJIT_INLINE X86GpVar clone() const noexcept { return X86GpVar(*this); } //! Reset X86GpVar operand. - ASMJIT_INLINE void reset() { X86Var::reset(); } + ASMJIT_INLINE void reset() noexcept { X86Var::reset(); } // -------------------------------------------------------------------------- // [X86GpVar Cast] @@ -1879,32 +1871,32 @@ public: //! This function has been designed to help with maintaining code that runs //! in both 32-bit and 64-bit modes. If you have variables that have mixed //! types, use `X86GpVar::as()` to cast one type to another. - ASMJIT_INLINE X86GpVar as(const X86GpVar& other) const { + ASMJIT_INLINE X86GpVar as(const X86GpVar& other) const noexcept { return X86GpVar(*this, other.getRegType(), other.getSize()); } //! Cast this variable to 8-bit (LO) part of variable. - ASMJIT_INLINE X86GpVar r8() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } + ASMJIT_INLINE X86GpVar r8() const noexcept { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } //! Cast this variable to 8-bit (LO) part of variable. - ASMJIT_INLINE X86GpVar r8Lo() const { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } + ASMJIT_INLINE X86GpVar r8Lo() const noexcept { return X86GpVar(*this, kX86RegTypeGpbLo, 1); } //! Cast this variable to 8-bit (HI) part of variable. - ASMJIT_INLINE X86GpVar r8Hi() const { return X86GpVar(*this, kX86RegTypeGpbHi, 1); } + ASMJIT_INLINE X86GpVar r8Hi() const noexcept { return X86GpVar(*this, kX86RegTypeGpbHi, 1); } //! Cast this variable to 16-bit part of variable. - ASMJIT_INLINE X86GpVar r16() const { return X86GpVar(*this, kX86RegTypeGpw, 2); } + ASMJIT_INLINE X86GpVar r16() const noexcept { return X86GpVar(*this, kX86RegTypeGpw, 2); } //! Cast this variable to 32-bit part of variable. - ASMJIT_INLINE X86GpVar r32() const { return X86GpVar(*this, kX86RegTypeGpd, 4); } + ASMJIT_INLINE X86GpVar r32() const noexcept { return X86GpVar(*this, kX86RegTypeGpd, 4); } //! Cast this variable to 64-bit part of variable. - ASMJIT_INLINE X86GpVar r64() const { return X86GpVar(*this, kX86RegTypeGpq, 8); } + ASMJIT_INLINE X86GpVar r64() const noexcept { return X86GpVar(*this, kX86RegTypeGpq, 8); } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86GpVar& operator=(const X86GpVar& other) { _copy(other); return *this; } + ASMJIT_INLINE X86GpVar& operator=(const X86GpVar& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86GpVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86GpVar& other) const { return X86Var::operator!=(other); } + ASMJIT_INLINE bool operator==(const X86GpVar& other) const noexcept { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86GpVar& other) const noexcept { return X86Var::operator!=(other); } }; #endif // !ASMJIT_DISABLE_COMPILER @@ -1920,30 +1912,30 @@ struct X86MmVar : public X86Var { // -------------------------------------------------------------------------- //! Create a new uninitialized `X86MmVar` instance. - ASMJIT_INLINE X86MmVar() : X86Var() {} + ASMJIT_INLINE X86MmVar() noexcept : X86Var() {} //! Create a clone of `other`. - ASMJIT_INLINE X86MmVar(const X86MmVar& other) : X86Var(other) {} + ASMJIT_INLINE X86MmVar(const X86MmVar& other) noexcept : X86Var(other) {} //! Create a new uninitialized `X86MmVar` instance (internal). - explicit ASMJIT_INLINE X86MmVar(const _NoInit&) : X86Var(NoInit) {} + explicit ASMJIT_INLINE X86MmVar(const _NoInit&) noexcept : X86Var(NoInit) {} // -------------------------------------------------------------------------- // [X86MmVar Specific] // -------------------------------------------------------------------------- //! Clone X86MmVar operand. - ASMJIT_INLINE X86MmVar clone() const { return X86MmVar(*this); } + ASMJIT_INLINE X86MmVar clone() const noexcept { return X86MmVar(*this); } //! Reset X86MmVar operand. - ASMJIT_INLINE void reset() { X86Var::reset(); } + ASMJIT_INLINE void reset() noexcept { X86Var::reset(); } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86MmVar& operator=(const X86MmVar& other) { _copy(other); return *this; } + ASMJIT_INLINE X86MmVar& operator=(const X86MmVar& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86MmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86MmVar& other) const { return X86Var::operator!=(other); } + ASMJIT_INLINE bool operator==(const X86MmVar& other) const noexcept { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86MmVar& other) const noexcept { return X86Var::operator!=(other); } }; #endif // !ASMJIT_DISABLE_COMPILER @@ -1959,7 +1951,7 @@ struct X86XmmVar : public X86Var { // -------------------------------------------------------------------------- protected: - ASMJIT_INLINE X86XmmVar(const X86Var& other, uint32_t reg, uint32_t size) + ASMJIT_INLINE X86XmmVar(const X86Var& other, uint32_t reg, uint32_t size) noexcept : X86Var(other, reg, size) {} friend struct X86YmmVar; @@ -1967,37 +1959,37 @@ protected: public: //! Create a new uninitialized `X86XmmVar` instance. - ASMJIT_INLINE X86XmmVar() : X86Var() {} + ASMJIT_INLINE X86XmmVar() noexcept : X86Var() {} //! Create a clone of `other`. - ASMJIT_INLINE X86XmmVar(const X86XmmVar& other) : X86Var(other) {} + ASMJIT_INLINE X86XmmVar(const X86XmmVar& other) noexcept : X86Var(other) {} //! Create a new uninitialized `X86XmmVar` instance (internal). - explicit ASMJIT_INLINE X86XmmVar(const _NoInit&) : X86Var(NoInit) {} + explicit ASMJIT_INLINE X86XmmVar(const _NoInit&) noexcept : X86Var(NoInit) {} // -------------------------------------------------------------------------- // [X86XmmVar Specific] // -------------------------------------------------------------------------- //! Clone X86XmmVar operand. - ASMJIT_INLINE X86XmmVar clone() const { return X86XmmVar(*this); } + ASMJIT_INLINE X86XmmVar clone() const noexcept { return X86XmmVar(*this); } //! Reset X86XmmVar operand. - ASMJIT_INLINE void reset() { X86Var::reset(); } + ASMJIT_INLINE void reset() noexcept { X86Var::reset(); } // -------------------------------------------------------------------------- // [X86XmmVar Cast] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this); } - ASMJIT_INLINE X86YmmVar ymm() const; - ASMJIT_INLINE X86ZmmVar zmm() const; + ASMJIT_INLINE X86XmmVar xmm() const noexcept { return X86XmmVar(*this); } + ASMJIT_INLINE X86YmmVar ymm() const noexcept; + ASMJIT_INLINE X86ZmmVar zmm() const noexcept; // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86XmmVar& operator=(const X86XmmVar& other) { _copy(other); return *this; } + ASMJIT_INLINE X86XmmVar& operator=(const X86XmmVar& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86XmmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86XmmVar& other) const { return X86Var::operator!=(other); } + ASMJIT_INLINE bool operator==(const X86XmmVar& other) const noexcept { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86XmmVar& other) const noexcept { return X86Var::operator!=(other); } }; #endif // !ASMJIT_DISABLE_COMPILER @@ -2013,7 +2005,7 @@ struct X86YmmVar : public X86Var { // -------------------------------------------------------------------------- protected: - ASMJIT_INLINE X86YmmVar(const X86Var& other, uint32_t reg, uint32_t size) + ASMJIT_INLINE X86YmmVar(const X86Var& other, uint32_t reg, uint32_t size) noexcept : X86Var(other, reg, size) {} friend struct X86XmmVar; @@ -2021,40 +2013,40 @@ protected: public: //! Create a new uninitialized `X86YmmVar` instance. - ASMJIT_INLINE X86YmmVar() : X86Var() {} + ASMJIT_INLINE X86YmmVar() noexcept : X86Var() {} //! Create a clone of `other`. - ASMJIT_INLINE X86YmmVar(const X86YmmVar& other) : X86Var(other) {} + ASMJIT_INLINE X86YmmVar(const X86YmmVar& other) noexcept : X86Var(other) {} //! Create a new uninitialized `X86YmmVar` instance (internal). - explicit ASMJIT_INLINE X86YmmVar(const _NoInit&) : X86Var(NoInit) {} + explicit ASMJIT_INLINE X86YmmVar(const _NoInit&) noexcept : X86Var(NoInit) {} // -------------------------------------------------------------------------- // [X86YmmVar Specific] // -------------------------------------------------------------------------- //! Clone X86YmmVar operand. - ASMJIT_INLINE X86YmmVar clone() const { return X86YmmVar(*this); } + ASMJIT_INLINE X86YmmVar clone() const noexcept { return X86YmmVar(*this); } //! Reset X86YmmVar operand. - ASMJIT_INLINE void reset() { X86Var::reset(); } + ASMJIT_INLINE void reset() noexcept { X86Var::reset(); } // -------------------------------------------------------------------------- // [X86YmmVar Cast] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this, kX86RegTypeXmm, 8); } - ASMJIT_INLINE X86YmmVar ymm() const { return X86YmmVar(*this); } - ASMJIT_INLINE X86ZmmVar zmm() const; + ASMJIT_INLINE X86XmmVar xmm() const noexcept { return X86XmmVar(*this, kX86RegTypeXmm, 8); } + ASMJIT_INLINE X86YmmVar ymm() const noexcept { return X86YmmVar(*this); } + ASMJIT_INLINE X86ZmmVar zmm() const noexcept; // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86YmmVar& operator=(const X86YmmVar& other) { _copy(other); return *this; } + ASMJIT_INLINE X86YmmVar& operator=(const X86YmmVar& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86YmmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86YmmVar& other) const { return X86Var::operator!=(other); } + ASMJIT_INLINE bool operator==(const X86YmmVar& other) const noexcept { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86YmmVar& other) const noexcept { return X86Var::operator!=(other); } }; -ASMJIT_INLINE X86YmmVar X86XmmVar::ymm() const { return X86YmmVar(*this, kX86RegTypeYmm, 16); } +ASMJIT_INLINE X86YmmVar X86XmmVar::ymm() const noexcept { return X86YmmVar(*this, kX86RegTypeYmm, 16); } #endif // !ASMJIT_DISABLE_COMPILER // ============================================================================ @@ -2069,7 +2061,7 @@ struct X86ZmmVar : public X86Var { // -------------------------------------------------------------------------- protected: - ASMJIT_INLINE X86ZmmVar(const X86Var& other, uint32_t reg, uint32_t size) + ASMJIT_INLINE X86ZmmVar(const X86Var& other, uint32_t reg, uint32_t size) noexcept : X86Var(other, reg, size) {} friend struct X86XmmVar; @@ -2077,68 +2069,68 @@ protected: public: //! Create a new uninitialized `X86ZmmVar` instance. - ASMJIT_INLINE X86ZmmVar() : X86Var() {} + ASMJIT_INLINE X86ZmmVar() noexcept : X86Var() {} //! Create a clone of `other`. - ASMJIT_INLINE X86ZmmVar(const X86ZmmVar& other) : X86Var(other) {} + ASMJIT_INLINE X86ZmmVar(const X86ZmmVar& other) noexcept : X86Var(other) {} //! Create a new uninitialized `X86ZmmVar` instance (internal). - explicit ASMJIT_INLINE X86ZmmVar(const _NoInit&) : X86Var(NoInit) {} + explicit ASMJIT_INLINE X86ZmmVar(const _NoInit&) noexcept : X86Var(NoInit) {} // -------------------------------------------------------------------------- // [X86ZmmVar Specific] // -------------------------------------------------------------------------- //! Clone X86ZmmVar operand. - ASMJIT_INLINE X86ZmmVar clone() const { return X86ZmmVar(*this); } + ASMJIT_INLINE X86ZmmVar clone() const noexcept { return X86ZmmVar(*this); } //! Reset X86ZmmVar operand. - ASMJIT_INLINE void reset() { X86Var::reset(); } + ASMJIT_INLINE void reset() noexcept { X86Var::reset(); } // -------------------------------------------------------------------------- // [X86ZmmVar Cast] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86XmmVar xmm() const { return X86XmmVar(*this, kX86RegTypeYmm, 8); } - ASMJIT_INLINE X86YmmVar ymm() const { return X86YmmVar(*this, kX86RegTypeYmm, 16); } - ASMJIT_INLINE X86ZmmVar zmm() const { return X86ZmmVar(*this); } + ASMJIT_INLINE X86XmmVar xmm() const noexcept { return X86XmmVar(*this, kX86RegTypeYmm, 8); } + ASMJIT_INLINE X86YmmVar ymm() const noexcept { return X86YmmVar(*this, kX86RegTypeYmm, 16); } + ASMJIT_INLINE X86ZmmVar zmm() const noexcept { return X86ZmmVar(*this); } // -------------------------------------------------------------------------- // [Operator Overload] // -------------------------------------------------------------------------- - ASMJIT_INLINE X86ZmmVar& operator=(const X86ZmmVar& other) { _copy(other); return *this; } + ASMJIT_INLINE X86ZmmVar& operator=(const X86ZmmVar& other) noexcept { _copy(other); return *this; } - ASMJIT_INLINE bool operator==(const X86ZmmVar& other) const { return X86Var::operator==(other); } - ASMJIT_INLINE bool operator!=(const X86ZmmVar& other) const { return X86Var::operator!=(other); } + ASMJIT_INLINE bool operator==(const X86ZmmVar& other) const noexcept { return X86Var::operator==(other); } + ASMJIT_INLINE bool operator!=(const X86ZmmVar& other) const noexcept { return X86Var::operator!=(other); } }; -ASMJIT_INLINE X86ZmmVar X86XmmVar::zmm() const { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } -ASMJIT_INLINE X86ZmmVar X86YmmVar::zmm() const { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } +ASMJIT_INLINE X86ZmmVar X86XmmVar::zmm() const noexcept { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } +ASMJIT_INLINE X86ZmmVar X86YmmVar::zmm() const noexcept { return X86ZmmVar(*this, kX86RegTypeZmm, 32); } #endif // !ASMJIT_DISABLE_COMPILER -#endif // ASMJIT_EXPORTS_X86OPERAND_REGS +#endif // ============================================================================ // [asmjit::X86RegData] // ============================================================================ struct X86RegData { - X86RipReg rip; - X86GpReg noGp; - - X86SegReg seg[7]; - - X86GpReg gpbLo[16]; - X86GpReg gpbHi[4]; - - X86GpReg gpw[16]; X86GpReg gpd[16]; X86GpReg gpq[16]; - X86FpReg fp[8]; - X86MmReg mm[8]; - X86KReg k[8]; + X86GpReg gpbLo[16]; + X86GpReg gpbHi[4]; + X86GpReg gpw[16]; X86XmmReg xmm[32]; X86YmmReg ymm[32]; X86ZmmReg zmm[32]; + + X86KReg k[8]; + X86FpReg fp[8]; + X86MmReg mm[8]; + + X86SegReg seg[7]; + + X86GpReg noGp; + X86RipReg rip; }; ASMJIT_VARAPI const X86RegData x86RegData; @@ -2146,8 +2138,8 @@ ASMJIT_VARAPI const X86RegData x86RegData; // [asmjit::x86] // ============================================================================ -// This is only defined by `x86operand_regs.cpp` when exporting registers. -#if !defined(ASMJIT_EXPORTS_X86OPERAND_REGS) +// The macro is only defined by `x86operand_regs.cpp` when exporting registers. +#if !defined(ASMJIT_EXPORTS_X86_REGS) namespace x86 { @@ -2158,15 +2150,39 @@ namespace x86 { #define ASMJIT_DEF_REG(_Type_, _Name_, _Field_) \ static const _Type_& _Name_ = x86RegData._Field_; -ASMJIT_DEF_REG(X86RipReg, rip, rip) //!< RIP register. -ASMJIT_DEF_REG(X86GpReg , noGpReg, noGp) //!< No GP register (for `X86Mem` operand). +ASMJIT_DEF_REG(X86GpReg , eax , gpd[0]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ecx , gpd[1]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , edx , gpd[2]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ebx , gpd[3]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , esp , gpd[4]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , ebp , gpd[5]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , esi , gpd[6]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , edi , gpd[7]) //!< 32-bit Gpd register. +ASMJIT_DEF_REG(X86GpReg , r8d , gpd[8]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r9d , gpd[9]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r10d , gpd[10]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r11d , gpd[11]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r12d , gpd[12]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r13d , gpd[13]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r14d , gpd[14]) //!< 32-bit Gpd register (X64). +ASMJIT_DEF_REG(X86GpReg , r15d , gpd[15]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86SegReg, es , seg[1]) //!< Cs segment register. -ASMJIT_DEF_REG(X86SegReg, cs , seg[2]) //!< Ss segment register. -ASMJIT_DEF_REG(X86SegReg, ss , seg[3]) //!< Ds segment register. -ASMJIT_DEF_REG(X86SegReg, ds , seg[4]) //!< Es segment register. -ASMJIT_DEF_REG(X86SegReg, fs , seg[5]) //!< Fs segment register. -ASMJIT_DEF_REG(X86SegReg, gs , seg[6]) //!< Gs segment register. +ASMJIT_DEF_REG(X86GpReg , rax , gpq[0]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rcx , gpq[1]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rdx , gpq[2]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rbx , gpq[3]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rsp , gpq[4]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rbp , gpq[5]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rsi , gpq[6]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , rdi , gpq[7]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r8 , gpq[8]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r9 , gpq[9]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r10 , gpq[10]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r11 , gpq[11]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r12 , gpq[12]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r13 , gpq[13]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r14 , gpq[14]) //!< 64-bit Gpq register (X64). +ASMJIT_DEF_REG(X86GpReg , r15 , gpq[15]) //!< 64-bit Gpq register (X64). ASMJIT_DEF_REG(X86GpReg , al , gpbLo[0]) //!< 8-bit Gpb-lo register. ASMJIT_DEF_REG(X86GpReg , cl , gpbLo[1]) //!< 8-bit Gpb-lo register. @@ -2207,67 +2223,6 @@ ASMJIT_DEF_REG(X86GpReg , r13w , gpw[13]) //!< 16-bit Gpw register (X64). ASMJIT_DEF_REG(X86GpReg , r14w , gpw[14]) //!< 16-bit Gpw register (X64). ASMJIT_DEF_REG(X86GpReg , r15w , gpw[15]) //!< 16-bit Gpw register (X64). -ASMJIT_DEF_REG(X86GpReg , eax , gpd[0]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ecx , gpd[1]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , edx , gpd[2]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ebx , gpd[3]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , esp , gpd[4]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , ebp , gpd[5]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , esi , gpd[6]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , edi , gpd[7]) //!< 32-bit Gpd register. -ASMJIT_DEF_REG(X86GpReg , r8d , gpd[8]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r9d , gpd[9]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r10d , gpd[10]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r11d , gpd[11]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r12d , gpd[12]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r13d , gpd[13]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r14d , gpd[14]) //!< 32-bit Gpd register (X64). -ASMJIT_DEF_REG(X86GpReg , r15d , gpd[15]) //!< 32-bit Gpd register (X64). - -ASMJIT_DEF_REG(X86GpReg , rax , gpq[0]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rcx , gpq[1]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rdx , gpq[2]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rbx , gpq[3]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rsp , gpq[4]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rbp , gpq[5]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rsi , gpq[6]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , rdi , gpq[7]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r8 , gpq[8]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r9 , gpq[9]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r10 , gpq[10]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r11 , gpq[11]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r12 , gpq[12]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r13 , gpq[13]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r14 , gpq[14]) //!< 64-bit Gpq register (X64). -ASMJIT_DEF_REG(X86GpReg , r15 , gpq[15]) //!< 64-bit Gpq register (X64). - -ASMJIT_DEF_REG(X86FpReg , fp0 , fp[0]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp1 , fp[1]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp2 , fp[2]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp3 , fp[3]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp4 , fp[4]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp5 , fp[5]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp6 , fp[6]) //!< 80-bit Fp register. -ASMJIT_DEF_REG(X86FpReg , fp7 , fp[7]) //!< 80-bit Fp register. - -ASMJIT_DEF_REG(X86MmReg , mm0 , mm[0]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm1 , mm[1]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm2 , mm[2]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm3 , mm[3]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm4 , mm[4]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm5 , mm[5]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm6 , mm[6]) //!< 64-bit Mm register. -ASMJIT_DEF_REG(X86MmReg , mm7 , mm[7]) //!< 64-bit Mm register. - -ASMJIT_DEF_REG(X86KReg , k0 , k[0]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k1 , k[1]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k2 , k[2]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k3 , k[3]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k4 , k[4]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k5 , k[5]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k6 , k[6]) //!< 64-bit K register. -ASMJIT_DEF_REG(X86KReg , k7 , k[7]) //!< 64-bit K register. - ASMJIT_DEF_REG(X86XmmReg, xmm0 , xmm[0]) //!< 128-bit Xmm register. ASMJIT_DEF_REG(X86XmmReg, xmm1 , xmm[1]) //!< 128-bit Xmm register. ASMJIT_DEF_REG(X86XmmReg, xmm2 , xmm[2]) //!< 128-bit Xmm register. @@ -2366,116 +2321,154 @@ ASMJIT_DEF_REG(X86ZmmReg, zmm28, zmm[28]) //!< 512-bit Zmm register (X64 & AVX5 ASMJIT_DEF_REG(X86ZmmReg, zmm29, zmm[29]) //!< 512-bit Zmm register (X64 & AVX512+). ASMJIT_DEF_REG(X86ZmmReg, zmm30, zmm[30]) //!< 512-bit Zmm register (X64 & AVX512+). ASMJIT_DEF_REG(X86ZmmReg, zmm31, zmm[31]) //!< 512-bit Zmm register (X64 & AVX512+). + +ASMJIT_DEF_REG(X86KReg , k0 , k[0]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k1 , k[1]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k2 , k[2]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k3 , k[3]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k4 , k[4]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k5 , k[5]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k6 , k[6]) //!< 64-bit K register. +ASMJIT_DEF_REG(X86KReg , k7 , k[7]) //!< 64-bit K register. + +ASMJIT_DEF_REG(X86FpReg , fp0 , fp[0]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp1 , fp[1]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp2 , fp[2]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp3 , fp[3]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp4 , fp[4]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp5 , fp[5]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp6 , fp[6]) //!< 80-bit Fp register. +ASMJIT_DEF_REG(X86FpReg , fp7 , fp[7]) //!< 80-bit Fp register. + +ASMJIT_DEF_REG(X86MmReg , mm0 , mm[0]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm1 , mm[1]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm2 , mm[2]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm3 , mm[3]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm4 , mm[4]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm5 , mm[5]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm6 , mm[6]) //!< 64-bit Mm register. +ASMJIT_DEF_REG(X86MmReg , mm7 , mm[7]) //!< 64-bit Mm register. + +ASMJIT_DEF_REG(X86SegReg, es , seg[1]) //!< Cs segment register. +ASMJIT_DEF_REG(X86SegReg, cs , seg[2]) //!< Ss segment register. +ASMJIT_DEF_REG(X86SegReg, ss , seg[3]) //!< Ds segment register. +ASMJIT_DEF_REG(X86SegReg, ds , seg[4]) //!< Es segment register. +ASMJIT_DEF_REG(X86SegReg, fs , seg[5]) //!< Fs segment register. +ASMJIT_DEF_REG(X86SegReg, gs , seg[6]) //!< Gs segment register. + +ASMJIT_DEF_REG(X86GpReg , noGpReg, noGp) //!< No GP register (for `X86Mem` operand). +ASMJIT_DEF_REG(X86RipReg, rip, rip) //!< RIP register. + #undef ASMJIT_DEF_REG //! Create 8-bit Gpb-lo register operand. -static ASMJIT_INLINE X86GpReg gpb_lo(uint32_t index) { return X86GpReg(kX86RegTypeGpbLo, index, 1); } +static ASMJIT_INLINE X86GpReg gpb_lo(uint32_t index) noexcept { return X86GpReg(kX86RegTypeGpbLo, index, 1); } //! Create 8-bit Gpb-hi register operand. -static ASMJIT_INLINE X86GpReg gpb_hi(uint32_t index) { return X86GpReg(kX86RegTypeGpbHi, index, 1); } +static ASMJIT_INLINE X86GpReg gpb_hi(uint32_t index) noexcept { return X86GpReg(kX86RegTypeGpbHi, index, 1); } //! Create 16-bit Gpw register operand. -static ASMJIT_INLINE X86GpReg gpw(uint32_t index) { return X86GpReg(kX86RegTypeGpw, index, 2); } +static ASMJIT_INLINE X86GpReg gpw(uint32_t index) noexcept { return X86GpReg(kX86RegTypeGpw, index, 2); } //! Create 32-bit Gpd register operand. -static ASMJIT_INLINE X86GpReg gpd(uint32_t index) { return X86GpReg(kX86RegTypeGpd, index, 4); } +static ASMJIT_INLINE X86GpReg gpd(uint32_t index) noexcept { return X86GpReg(kX86RegTypeGpd, index, 4); } //! Create 64-bit Gpq register operand (X64). -static ASMJIT_INLINE X86GpReg gpq(uint32_t index) { return X86GpReg(kX86RegTypeGpq, index, 8); } +static ASMJIT_INLINE X86GpReg gpq(uint32_t index) noexcept { return X86GpReg(kX86RegTypeGpq, index, 8); } //! Create 80-bit Fp register operand. -static ASMJIT_INLINE X86FpReg fp(uint32_t index) { return X86FpReg(kX86RegTypeFp, index, 10); } +static ASMJIT_INLINE X86FpReg fp(uint32_t index) noexcept { return X86FpReg(kX86RegTypeFp, index, 10); } //! Create 64-bit Mm register operand. -static ASMJIT_INLINE X86MmReg mm(uint32_t index) { return X86MmReg(kX86RegTypeMm, index, 8); } +static ASMJIT_INLINE X86MmReg mm(uint32_t index) noexcept { return X86MmReg(kX86RegTypeMm, index, 8); } //! Create 64-bit K register operand. -static ASMJIT_INLINE X86KReg k(uint32_t index) { return X86KReg(kX86RegTypeK, index, 8); } +static ASMJIT_INLINE X86KReg k(uint32_t index) noexcept { return X86KReg(kX86RegTypeK, index, 8); } //! Create 128-bit Xmm register operand. -static ASMJIT_INLINE X86XmmReg xmm(uint32_t index) { return X86XmmReg(kX86RegTypeXmm, index, 16); } +static ASMJIT_INLINE X86XmmReg xmm(uint32_t index) noexcept { return X86XmmReg(kX86RegTypeXmm, index, 16); } //! Create 256-bit Ymm register operand. -static ASMJIT_INLINE X86YmmReg ymm(uint32_t index) { return X86YmmReg(kX86RegTypeYmm, index, 32); } +static ASMJIT_INLINE X86YmmReg ymm(uint32_t index) noexcept { return X86YmmReg(kX86RegTypeYmm, index, 32); } //! Create 512-bit Zmm register operand. -static ASMJIT_INLINE X86ZmmReg zmm(uint32_t index) { return X86ZmmReg(kX86RegTypeZmm, index, 64); } +static ASMJIT_INLINE X86ZmmReg zmm(uint32_t index) noexcept { return X86ZmmReg(kX86RegTypeZmm, index, 64); } // ============================================================================ // [asmjit::x86 - Ptr (Reg)] // ============================================================================ //! Create `[base.reg + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, disp, size); } //! Create `[base.reg + (index.reg << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[base.reg + (xmm.reg << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[base.reg + (ymm.reg << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpReg& base, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[label + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const Label& label, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const Label& label, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(label, disp, size); } //! Create `[label + (index.reg << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0, uint32_t size = 0) { \ +static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0, uint32_t size = 0) noexcept { \ return X86Mem(label, index, shift, disp, size); \ } //! Create `[RIP + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip_, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86RipReg& rip_, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(rip_, disp, size); } -//! Create `[pAbs + disp]` absolute memory operand with no/custom size information. -ASMJIT_API X86Mem ptr_abs(Ptr pAbs, int32_t disp = 0, uint32_t size = 0); -//! Create `[pAbs + (index.reg << shift) + disp]` absolute memory operand with no/custom size information. -ASMJIT_API X86Mem ptr_abs(Ptr pAbs, const X86Reg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0); +//! Create `[p + disp]` absolute memory operand with no/custom size information. +ASMJIT_API X86Mem ptr_abs(Ptr p, int32_t disp = 0, uint32_t size = 0) noexcept; +//! Create `[p + (index.reg << shift) + disp]` absolute memory operand with no/custom size information. +ASMJIT_API X86Mem ptr_abs(Ptr p, const X86Reg& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept; //! \internal #define ASMJIT_EXPAND_PTR_REG(_Prefix_, _Size_) \ /*! Create `[base.reg + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, int32_t disp = 0) noexcept { \ return X86Mem(base, disp, _Size_); \ } \ /*! Create `[base.reg + (index.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[base.reg + (xmm.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[base.reg + (ymm.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpReg& base, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[label + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, int32_t disp = 0) noexcept { \ return ptr(label, disp, _Size_); \ } \ /*! Create `[label + (index.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, const X86GpReg& index, uint32_t shift, int32_t disp = 0) noexcept { \ return ptr(label, index, shift, disp, _Size_); \ } \ /*! Create `[RIP + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip_, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##ptr(const X86RipReg& rip_, int32_t disp = 0) noexcept { \ return ptr(rip_, disp, _Size_); \ } \ - /*! Create `[pAbs + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, int32_t disp = 0) { \ - return ptr_abs(pAbs, disp, _Size_); \ + /*! Create `[p + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, int32_t disp = 0) noexcept { \ + return ptr_abs(p, disp, _Size_); \ } \ - /*! Create `[pAbs + (index.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, index, shift, disp, _Size_); \ + /*! Create `[p + (index.reg << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86GpReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, index, shift, disp, _Size_); \ } \ - /*! Create `[pAbs + (xmm.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, index, shift, disp, _Size_); \ + /*! Create `[p + (xmm.reg << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86XmmReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, index, shift, disp, _Size_); \ } \ - /*! Create `[pAbs + (ymm.reg << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, index, shift, disp, _Size_); \ + /*! Create `[p + (ymm.reg << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86YmmReg& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, index, shift, disp, _Size_); \ } ASMJIT_EXPAND_PTR_REG(byte, 1) @@ -2494,62 +2487,62 @@ ASMJIT_EXPAND_PTR_REG(zword, 64) #if !defined(ASMJIT_DISABLE_COMPILER) //! Create `[base.var + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, disp, size); } //! Create `[base.var + (index.var << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[base.var + (xmm.var << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[base.var + (ymm.var << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) { +static ASMJIT_INLINE X86Mem ptr(const X86GpVar& base, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept { return X86Mem(base, index, shift, disp, size); } //! Create `[label + (index.var << shift) + disp]` memory operand with no/custom size information. -static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0, uint32_t size = 0) { \ +static ASMJIT_INLINE X86Mem ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0, uint32_t size = 0) noexcept { \ return X86Mem(label, index, shift, disp, size); \ } -//! Create `[pAbs + (index.var << shift) + disp]` absolute memory operand with no/custom size information. -ASMJIT_API X86Mem ptr_abs(Ptr pAbs, const X86Var& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0); +//! Create `[p + (index.var << shift) + disp]` absolute memory operand with no/custom size information. +ASMJIT_API X86Mem ptr_abs(Ptr p, const X86Var& index, uint32_t shift = 0, int32_t disp = 0, uint32_t size = 0) noexcept; //! \internal #define ASMJIT_EXPAND_PTR_VAR(_Prefix_, _Size_) \ /*! Create `[base.var + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, int32_t disp = 0) noexcept { \ return X86Mem(base, disp, _Size_); \ } \ /*! Create `[base.var + (index.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[base.var + (xmm.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[base.var + (ymm.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const X86GpVar& base, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ return ptr(base, index, shift, disp, _Size_); \ } \ /*! Create `[label + (index.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0) { \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr(const Label& label, const X86GpVar& index, uint32_t shift, int32_t disp = 0) noexcept { \ return ptr(label, index, shift, disp, _Size_); \ } \ - /*! Create `[pAbs + (index.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, reinterpret_cast(index), shift, disp, _Size_); \ + /*! Create `[p + (index.var << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86GpVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, reinterpret_cast(index), shift, disp, _Size_); \ } \ - /*! Create `[pAbs + (xmm.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, reinterpret_cast(index), shift, disp, _Size_); \ + /*! Create `[p + (xmm.var << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86XmmVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, reinterpret_cast(index), shift, disp, _Size_); \ } \ - /*! Create `[pAbs + (ymm.var << shift) + disp]` memory operand. */ \ - static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr pAbs, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0) { \ - return ptr_abs(pAbs, reinterpret_cast(index), shift, disp, _Size_); \ + /*! Create `[p + (ymm.var << shift) + disp]` memory operand. */ \ + static ASMJIT_INLINE X86Mem _Prefix_##_ptr##_abs(Ptr p, const X86YmmVar& index, uint32_t shift = 0, int32_t disp = 0) noexcept { \ + return ptr_abs(p, reinterpret_cast(index), shift, disp, _Size_); \ } ASMJIT_EXPAND_PTR_VAR(byte, 1) @@ -2566,7 +2559,7 @@ ASMJIT_EXPAND_PTR_VAR(zword, 64) } // x86 namespace -#endif // !ASMJIT_EXPORTS_X86OPERAND_REGS +#endif // !ASMJIT_EXPORTS_X86_REGS //! \} diff --git a/src/asmjit/x86/x86operand_regs.cpp b/src/asmjit/x86/x86operand_regs.cpp index 4bcd327..d6af196 100644 --- a/src/asmjit/x86/x86operand_regs.cpp +++ b/src/asmjit/x86/x86operand_regs.cpp @@ -6,7 +6,7 @@ // [Export] #define ASMJIT_EXPORTS -#define ASMJIT_EXPORTS_X86OPERAND_REGS +#define ASMJIT_EXPORTS_X86_REGS // [Guard] #include "../build.h" @@ -21,24 +21,48 @@ namespace asmjit { #define REG(_Type_, _Index_, _Size_) {{{ \ - kOperandTypeReg, _Size_, { ((_Type_) << 8) + _Index_ }, kInvalidValue, {{ kInvalidVar, 0 }} \ + Operand::kTypeReg, _Size_, { ((_Type_) << 8) + _Index_ }, kInvalidValue, {{ kInvalidVar, 0 }} \ }}} const X86RegData x86RegData = { - // RIP. - REG(kX86RegTypeRip, 0, 0), - // NpGp. - REG(kInvalidReg, kInvalidReg, 0), - - // Segments. + // Gpd. { - REG(kX86RegTypeSeg, 0, 2), // Default. - REG(kX86RegTypeSeg, 1, 2), // ES. - REG(kX86RegTypeSeg, 2, 2), // CS. - REG(kX86RegTypeSeg, 3, 2), // SS. - REG(kX86RegTypeSeg, 4, 2), // DS. - REG(kX86RegTypeSeg, 5, 2), // FS. - REG(kX86RegTypeSeg, 6, 2) // GS. + REG(kX86RegTypeGpd, 0 , 4), + REG(kX86RegTypeGpd, 1 , 4), + REG(kX86RegTypeGpd, 2 , 4), + REG(kX86RegTypeGpd, 3 , 4), + REG(kX86RegTypeGpd, 4 , 4), + REG(kX86RegTypeGpd, 5 , 4), + REG(kX86RegTypeGpd, 6 , 4), + REG(kX86RegTypeGpd, 7 , 4), + REG(kX86RegTypeGpd, 8 , 4), + REG(kX86RegTypeGpd, 9 , 4), + REG(kX86RegTypeGpd, 10, 4), + REG(kX86RegTypeGpd, 11, 4), + REG(kX86RegTypeGpd, 12, 4), + REG(kX86RegTypeGpd, 13, 4), + REG(kX86RegTypeGpd, 14, 4), + REG(kX86RegTypeGpd, 15, 4) + }, + + // Gpq. + { + REG(kX86RegTypeGpq, 0 , 8), + REG(kX86RegTypeGpq, 1 , 8), + REG(kX86RegTypeGpq, 2 , 8), + REG(kX86RegTypeGpq, 3 , 8), + REG(kX86RegTypeGpq, 4 , 8), + REG(kX86RegTypeGpq, 5 , 8), + REG(kX86RegTypeGpq, 6 , 8), + REG(kX86RegTypeGpq, 7 , 8), + REG(kX86RegTypeGpq, 8 , 8), + REG(kX86RegTypeGpq, 9 , 8), + REG(kX86RegTypeGpq, 10, 8), + REG(kX86RegTypeGpq, 11, 8), + REG(kX86RegTypeGpq, 12, 8), + REG(kX86RegTypeGpq, 13, 8), + REG(kX86RegTypeGpq, 14, 8), + REG(kX86RegTypeGpq, 15, 8) }, // GpbLo. @@ -71,16 +95,16 @@ const X86RegData x86RegData = { // Gpw. { - REG(kX86RegTypeGpw, 0, 2), - REG(kX86RegTypeGpw, 1, 2), - REG(kX86RegTypeGpw, 2, 2), - REG(kX86RegTypeGpw, 3, 2), - REG(kX86RegTypeGpw, 4, 2), - REG(kX86RegTypeGpw, 5, 2), - REG(kX86RegTypeGpw, 6, 2), - REG(kX86RegTypeGpw, 7, 2), - REG(kX86RegTypeGpw, 8, 2), - REG(kX86RegTypeGpw, 9, 2), + REG(kX86RegTypeGpw, 0 , 2), + REG(kX86RegTypeGpw, 1 , 2), + REG(kX86RegTypeGpw, 2 , 2), + REG(kX86RegTypeGpw, 3 , 2), + REG(kX86RegTypeGpw, 4 , 2), + REG(kX86RegTypeGpw, 5 , 2), + REG(kX86RegTypeGpw, 6 , 2), + REG(kX86RegTypeGpw, 7 , 2), + REG(kX86RegTypeGpw, 8 , 2), + REG(kX86RegTypeGpw, 9 , 2), REG(kX86RegTypeGpw, 10, 2), REG(kX86RegTypeGpw, 11, 2), REG(kX86RegTypeGpw, 12, 2), @@ -89,94 +113,18 @@ const X86RegData x86RegData = { REG(kX86RegTypeGpw, 15, 2) }, - // Gpd. - { - REG(kX86RegTypeGpd, 0, 4), - REG(kX86RegTypeGpd, 1, 4), - REG(kX86RegTypeGpd, 2, 4), - REG(kX86RegTypeGpd, 3, 4), - REG(kX86RegTypeGpd, 4, 4), - REG(kX86RegTypeGpd, 5, 4), - REG(kX86RegTypeGpd, 6, 4), - REG(kX86RegTypeGpd, 7, 4), - REG(kX86RegTypeGpd, 8, 4), - REG(kX86RegTypeGpd, 9, 4), - REG(kX86RegTypeGpd, 10, 4), - REG(kX86RegTypeGpd, 11, 4), - REG(kX86RegTypeGpd, 12, 4), - REG(kX86RegTypeGpd, 13, 4), - REG(kX86RegTypeGpd, 14, 4), - REG(kX86RegTypeGpd, 15, 4) - }, - - // Gpq. - { - REG(kX86RegTypeGpq, 0, 8), - REG(kX86RegTypeGpq, 1, 8), - REG(kX86RegTypeGpq, 2, 8), - REG(kX86RegTypeGpq, 3, 8), - REG(kX86RegTypeGpq, 4, 8), - REG(kX86RegTypeGpq, 5, 8), - REG(kX86RegTypeGpq, 6, 8), - REG(kX86RegTypeGpq, 7, 8), - REG(kX86RegTypeGpq, 8, 8), - REG(kX86RegTypeGpq, 9, 8), - REG(kX86RegTypeGpq, 10, 8), - REG(kX86RegTypeGpq, 11, 8), - REG(kX86RegTypeGpq, 12, 8), - REG(kX86RegTypeGpq, 13, 8), - REG(kX86RegTypeGpq, 14, 8), - REG(kX86RegTypeGpq, 15, 8) - }, - - // Fp. - { - REG(kX86RegTypeFp, 0, 10), - REG(kX86RegTypeFp, 1, 10), - REG(kX86RegTypeFp, 2, 10), - REG(kX86RegTypeFp, 3, 10), - REG(kX86RegTypeFp, 4, 10), - REG(kX86RegTypeFp, 5, 10), - REG(kX86RegTypeFp, 6, 10), - REG(kX86RegTypeFp, 7, 10) - }, - - // Mm. - { - REG(kX86RegTypeMm, 0, 8), - REG(kX86RegTypeMm, 1, 8), - REG(kX86RegTypeMm, 2, 8), - REG(kX86RegTypeMm, 3, 8), - REG(kX86RegTypeMm, 4, 8), - REG(kX86RegTypeMm, 5, 8), - REG(kX86RegTypeMm, 6, 8), - REG(kX86RegTypeMm, 7, 8) - }, - - // K. - { - REG(kX86RegTypeK, 0, 8), - REG(kX86RegTypeK, 1, 8), - REG(kX86RegTypeK, 2, 8), - REG(kX86RegTypeK, 3, 8), - REG(kX86RegTypeK, 4, 8), - REG(kX86RegTypeK, 5, 8), - REG(kX86RegTypeK, 6, 8), - REG(kX86RegTypeK, 7, 8) - }, - // Xmm. { - REG(kX86RegTypeXmm, 0, 16), - REG(kX86RegTypeXmm, 1, 16), - REG(kX86RegTypeXmm, 2, 16), - REG(kX86RegTypeXmm, 3, 16), - REG(kX86RegTypeXmm, 4, 16), - REG(kX86RegTypeXmm, 5, 16), - REG(kX86RegTypeXmm, 6, 16), - REG(kX86RegTypeXmm, 7, 16), - REG(kX86RegTypeXmm, 8, 16), - REG(kX86RegTypeXmm, 9, 16), + REG(kX86RegTypeXmm, 0 , 16), + REG(kX86RegTypeXmm, 1 , 16), + REG(kX86RegTypeXmm, 2 , 16), + REG(kX86RegTypeXmm, 3 , 16), + REG(kX86RegTypeXmm, 4 , 16), + REG(kX86RegTypeXmm, 5 , 16), + REG(kX86RegTypeXmm, 6 , 16), + REG(kX86RegTypeXmm, 7 , 16), + REG(kX86RegTypeXmm, 8 , 16), + REG(kX86RegTypeXmm, 9 , 16), REG(kX86RegTypeXmm, 10, 16), REG(kX86RegTypeXmm, 11, 16), REG(kX86RegTypeXmm, 12, 16), @@ -203,16 +151,16 @@ const X86RegData x86RegData = { // Ymm. { - REG(kX86RegTypeYmm, 0, 32), - REG(kX86RegTypeYmm, 1, 32), - REG(kX86RegTypeYmm, 2, 32), - REG(kX86RegTypeYmm, 3, 32), - REG(kX86RegTypeYmm, 4, 32), - REG(kX86RegTypeYmm, 5, 32), - REG(kX86RegTypeYmm, 6, 32), - REG(kX86RegTypeYmm, 7, 32), - REG(kX86RegTypeYmm, 8, 32), - REG(kX86RegTypeYmm, 9, 32), + REG(kX86RegTypeYmm, 0 , 32), + REG(kX86RegTypeYmm, 1 , 32), + REG(kX86RegTypeYmm, 2 , 32), + REG(kX86RegTypeYmm, 3 , 32), + REG(kX86RegTypeYmm, 4 , 32), + REG(kX86RegTypeYmm, 5 , 32), + REG(kX86RegTypeYmm, 6 , 32), + REG(kX86RegTypeYmm, 7 , 32), + REG(kX86RegTypeYmm, 8 , 32), + REG(kX86RegTypeYmm, 9 , 32), REG(kX86RegTypeYmm, 10, 32), REG(kX86RegTypeYmm, 11, 32), REG(kX86RegTypeYmm, 12, 32), @@ -239,16 +187,16 @@ const X86RegData x86RegData = { // Zmm. { - REG(kX86RegTypeZmm, 0, 64), - REG(kX86RegTypeZmm, 1, 64), - REG(kX86RegTypeZmm, 2, 64), - REG(kX86RegTypeZmm, 3, 64), - REG(kX86RegTypeZmm, 4, 64), - REG(kX86RegTypeZmm, 5, 64), - REG(kX86RegTypeZmm, 6, 64), - REG(kX86RegTypeZmm, 7, 64), - REG(kX86RegTypeZmm, 8, 64), - REG(kX86RegTypeZmm, 9, 64), + REG(kX86RegTypeZmm, 0 , 64), + REG(kX86RegTypeZmm, 1 , 64), + REG(kX86RegTypeZmm, 2 , 64), + REG(kX86RegTypeZmm, 3 , 64), + REG(kX86RegTypeZmm, 4 , 64), + REG(kX86RegTypeZmm, 5 , 64), + REG(kX86RegTypeZmm, 6 , 64), + REG(kX86RegTypeZmm, 7 , 64), + REG(kX86RegTypeZmm, 8 , 64), + REG(kX86RegTypeZmm, 9 , 64), REG(kX86RegTypeZmm, 10, 64), REG(kX86RegTypeZmm, 11, 64), REG(kX86RegTypeZmm, 12, 64), @@ -271,7 +219,59 @@ const X86RegData x86RegData = { REG(kX86RegTypeZmm, 29, 64), REG(kX86RegTypeZmm, 30, 64), REG(kX86RegTypeZmm, 31, 64) - } + }, + + // K. + { + REG(kX86RegTypeK, 0, 8), + REG(kX86RegTypeK, 1, 8), + REG(kX86RegTypeK, 2, 8), + REG(kX86RegTypeK, 3, 8), + REG(kX86RegTypeK, 4, 8), + REG(kX86RegTypeK, 5, 8), + REG(kX86RegTypeK, 6, 8), + REG(kX86RegTypeK, 7, 8) + }, + + // Fp. + { + REG(kX86RegTypeFp, 0, 10), + REG(kX86RegTypeFp, 1, 10), + REG(kX86RegTypeFp, 2, 10), + REG(kX86RegTypeFp, 3, 10), + REG(kX86RegTypeFp, 4, 10), + REG(kX86RegTypeFp, 5, 10), + REG(kX86RegTypeFp, 6, 10), + REG(kX86RegTypeFp, 7, 10) + }, + + // Mm. + { + REG(kX86RegTypeMm, 0, 8), + REG(kX86RegTypeMm, 1, 8), + REG(kX86RegTypeMm, 2, 8), + REG(kX86RegTypeMm, 3, 8), + REG(kX86RegTypeMm, 4, 8), + REG(kX86RegTypeMm, 5, 8), + REG(kX86RegTypeMm, 6, 8), + REG(kX86RegTypeMm, 7, 8) + }, + + // Segments. + { + REG(kX86RegTypeSeg, 0, 2), // Default. + REG(kX86RegTypeSeg, 1, 2), // ES. + REG(kX86RegTypeSeg, 2, 2), // CS. + REG(kX86RegTypeSeg, 3, 2), // SS. + REG(kX86RegTypeSeg, 4, 2), // DS. + REG(kX86RegTypeSeg, 5, 2), // FS. + REG(kX86RegTypeSeg, 6, 2) // GS. + }, + + // NoGp. + REG(kInvalidReg, kInvalidReg, 0), + // RIP. + REG(kX86RegTypeRip, 0, 0), }; #undef REG diff --git a/src/asmjit/x86/x86scheduler.cpp b/src/asmjit/x86/x86scheduler.cpp deleted file mode 100644 index fb93e87..0000000 --- a/src/asmjit/x86/x86scheduler.cpp +++ /dev/null @@ -1,94 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Export] -#define ASMJIT_EXPORTS - -// [Guard] -#include "../build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) && (defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64)) - -// [Dependencies - AsmJit] -#include "../base/containers.h" -#include "../x86/x86scheduler_p.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [Internals] -// ============================================================================ - -//! \internal -struct X86ScheduleData { - //! Registers read by the instruction. - X86RegMask regsIn; - //! Registers written by the instruction. - X86RegMask regsOut; - - //! Flags read by the instruction. - uint8_t flagsIn; - //! Flags written by the instruction. - uint8_t flagsOut; - - //! How many `uops` or `cycles` the instruction takes. - uint8_t ops; - //! Instruction latency. - uint8_t latency; - - //! Which ports the instruction can run at. - uint16_t ports; - //! \internal - uint16_t reserved; - - //! All instructions that this instruction depends on. - PodList::Link* dependsOn; - //! All instructions that use the result of this instruction. - PodList::Link* usedBy; -}; - -// ============================================================================ -// [asmjit::X86Scheduler - Construction / Destruction] -// ============================================================================ - -X86Scheduler::X86Scheduler(X86Compiler* compiler, const X86CpuInfo* cpuInfo) : - _compiler(compiler), - _cpuInfo(cpuInfo) {} -X86Scheduler::~X86Scheduler() {} - -// ============================================================================ -// [asmjit::X86Scheduler - Run] -// ============================================================================ - -Error X86Scheduler::run(HLNode* start, HLNode* stop) { - /* - ASMJIT_TLOG("[Schedule] === Begin ==="); - - Zone zone(8096 - Zone::kZoneOverhead); - HLNode* node_ = start; - - while (node_ != stop) { - HLNode* next = node_->getNext(); - ASMJIT_ASSERT(node_->getType() == kHLNodeTypeInst); - - printf(" %s\n", X86Util::getInstInfo(static_cast(node_)->getInstId()).getInstName()); - node_ = next; - } - - ASMJIT_TLOG("[Schedule] === End ==="); - */ - return kErrorOk; -} - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER && (ASMJIT_BUILD_X86 || ASMJIT_BUILD_X64) diff --git a/src/asmjit/x86/x86scheduler_p.h b/src/asmjit/x86/x86scheduler_p.h deleted file mode 100644 index f5b4930..0000000 --- a/src/asmjit/x86/x86scheduler_p.h +++ /dev/null @@ -1,63 +0,0 @@ -// [AsmJit] -// Complete x86/x64 JIT and Remote Assembler for C++. -// -// [License] -// Zlib - See LICENSE.md file in the package. - -// [Guard] -#ifndef _ASMJIT_X86_X86SCHEDULER_P_H -#define _ASMJIT_X86_X86SCHEDULER_P_H - -#include "../build.h" -#if !defined(ASMJIT_DISABLE_COMPILER) - -// [Dependencies - AsmJit] -#include "../x86/x86compiler.h" -#include "../x86/x86compilercontext_p.h" -#include "../x86/x86cpuinfo.h" -#include "../x86/x86inst.h" - -// [Api-Begin] -#include "../apibegin.h" - -namespace asmjit { - -// ============================================================================ -// [asmjit::X86Scheduler] -// ============================================================================ - -//! \internal -//! -//! X86 scheduler. -struct X86Scheduler { - // -------------------------------------------------------------------------- - // [Construction / Destruction] - // -------------------------------------------------------------------------- - - X86Scheduler(X86Compiler* compiler, const X86CpuInfo* cpuInfo); - ~X86Scheduler(); - - // -------------------------------------------------------------------------- - // [Run] - // -------------------------------------------------------------------------- - - Error run(HLNode* start, HLNode* stop); - - // -------------------------------------------------------------------------- - // [Members] - // -------------------------------------------------------------------------- - - //! Attached compiler. - X86Compiler* _compiler; - //! CPU information used for scheduling. - const X86CpuInfo* _cpuInfo; -}; - -} // asmjit namespace - -// [Api-End] -#include "../apiend.h" - -// [Guard] -#endif // !ASMJIT_DISABLE_COMPILER -#endif // _ASMJIT_X86_X86SCHEDULER_P_H diff --git a/src/test/asmjit_bench_x86.cpp b/src/test/asmjit_bench_x86.cpp index 5e7cd18..00f8260 100644 --- a/src/test/asmjit_bench_x86.cpp +++ b/src/test/asmjit_bench_x86.cpp @@ -16,6 +16,68 @@ #include #include +// ============================================================================ +// [Configuration] +// ============================================================================ + +static const uint32_t kNumRepeats = 10; +static const uint32_t kNumIterations = 5000; + +// ============================================================================ +// [TestRuntime] +// ============================================================================ + +struct TestRuntime : public asmjit::Runtime { + ASMJIT_NO_COPY(TestRuntime) + + // -------------------------------------------------------------------------- + // [Construction / Destruction] + // -------------------------------------------------------------------------- + + TestRuntime(uint32_t arch, uint32_t callConv) ASMJIT_NOEXCEPT { + _cpuInfo.setArch(arch); + _stackAlignment = 16; + _baseAddress = 0; + _cdeclConv = static_cast(callConv); + _stdCallConv = static_cast(callConv); + } + virtual ~TestRuntime() ASMJIT_NOEXCEPT {} + + // -------------------------------------------------------------------------- + // [Interface] + // -------------------------------------------------------------------------- + + virtual asmjit::Error add(void** dst, asmjit::Assembler* assembler) ASMJIT_NOEXCEPT { + size_t codeSize = assembler->getCodeSize(); + if (codeSize == 0) { + *dst = NULL; + return asmjit::kErrorNoCodeGenerated; + } + + void* p = ::malloc(codeSize); + if (p == NULL) { + *dst = NULL; + return asmjit::kErrorNoHeapMemory; + } + + size_t relocSize = assembler->relocCode(p, _baseAddress); + if (relocSize == 0) { + ::free(p); + *dst = NULL; + return asmjit::kErrorInvalidState; + } + + *dst = p; + return asmjit::kErrorOk; + } + + virtual asmjit::Error release(void* p) ASMJIT_NOEXCEPT { + ::free(p); + return asmjit::kErrorOk; + } +}; + + // ============================================================================ // [Performance] // ============================================================================ @@ -49,34 +111,39 @@ struct Performance { uint32_t best; }; +static double mbps(uint32_t time, size_t outputSize) { + double bytesTotal = static_cast(outputSize); + return (bytesTotal * 1000) / (static_cast(time) * 1024 * 1024); +} + // ============================================================================ // [Main] // ============================================================================ -static uint32_t instPerMs(uint32_t time, uint32_t numIterations, uint32_t instPerIteration) { - return static_cast( - static_cast(numIterations) * instPerIteration * 1000 / time); -} - -int main(int argc, char* argv[]) { +#if defined(ASMJIT_BUILD_X86) || defined(ASMJIT_BUILD_X64) +static void benchX86(uint32_t arch, uint32_t callConv) { using namespace asmjit; Performance perf; - uint32_t kNumRepeats = 10; - uint32_t kNumIterations = 10000; + TestRuntime runtime(arch, callConv); - JitRuntime runtime; - X86Assembler a(&runtime); + X86Assembler a(&runtime, arch); X86Compiler c; uint32_t r, i; + const char* archName = arch == kArchX86 ? "X86" : "X64"; + // -------------------------------------------------------------------------- // [Bench - Opcode] // -------------------------------------------------------------------------- + size_t asmOutputSize = 0; + size_t cmpOutputSize = 0; + perf.reset(); for (r = 0; r < kNumRepeats; r++) { + asmOutputSize = 0; perf.start(); for (i = 0; i < kNumIterations; i++) { asmgen::opcode(a); @@ -84,14 +151,14 @@ int main(int argc, char* argv[]) { void *p = a.make(); runtime.release(p); + asmOutputSize += a.getCodeSize(); a.reset(); } perf.end(); } - printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", - "Assembler [GenOpCode]", - perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenOpCodeInstCount)); + printf("%-12s (%s) | Time: %-6u [ms] | Speed: %7.3f [MB/s]\n", + "X86Assembler", archName, perf.best, mbps(perf.best, asmOutputSize)); // -------------------------------------------------------------------------- // [Bench - Blend] @@ -99,6 +166,7 @@ int main(int argc, char* argv[]) { perf.reset(); for (r = 0; r < kNumRepeats; r++) { + cmpOutputSize = 0; perf.start(); for (i = 0; i < kNumIterations; i++) { c.attach(&a); @@ -108,14 +176,24 @@ int main(int argc, char* argv[]) { void* p = a.make(); runtime.release(p); + cmpOutputSize += a.getCodeSize(); a.reset(); } perf.end(); } - printf("%-22s | Time: %-6u [ms] | Speed: %-9u [inst/s]\n", - "Compiler [GenBlend]", - perf.best, instPerMs(perf.best, kNumIterations, asmgen::kGenBlendInstCount)); + printf("%-12s (%s) | Time: %-6u [ms] | Speed: %7.3f [MB/s]\n", + "X86Compiler", archName, perf.best, mbps(perf.best, cmpOutputSize)); +} +#endif + +int main(int argc, char* argv[]) { +#if defined(ASMJIT_BUILD_X86) + benchX86(asmjit::kArchX86, asmjit::kCallConvX86CDecl); +#endif +#if defined(ASMJIT_BUILD_X64) + benchX86(asmjit::kArchX64, asmjit::kCallConvX64Unix); +#endif return 0; } diff --git a/src/test/asmjit_test_opcode.cpp b/src/test/asmjit_test_opcode.cpp index 2cdd3ce..dc2479a 100644 --- a/src/test/asmjit_test_opcode.cpp +++ b/src/test/asmjit_test_opcode.cpp @@ -29,17 +29,18 @@ struct OpcodeDumpInfo { static const char* archIdToString(uint32_t archId) { switch (archId) { - case asmjit::kArchNone: return "None"; - case asmjit::kArchX86: return "X86"; - case asmjit::kArchX64: return "X64"; - case asmjit::kArchArm: return "ARM"; + case asmjit::kArchNone : return "None"; + case asmjit::kArchX86 : return "X86"; + case asmjit::kArchX64 : return "X64"; + case asmjit::kArchArm32: return "ARM32"; + case asmjit::kArchArm64: return "ARM64"; default: return ""; } } int main(int argc, char* argv[]) { asmjit::FileLogger logger(stdout); - logger.setOption(asmjit::kLoggerOptionBinaryForm, true); + logger.addOptions(asmjit::Logger::kOptionBinaryForm); OpcodeDumpInfo infoList[] = { # if defined(ASMJIT_BUILD_X86) diff --git a/src/test/asmjit_test_opcode.h b/src/test/asmjit_test_opcode.h index 066c14a..227bf52 100644 --- a/src/test/asmjit_test_opcode.h +++ b/src/test/asmjit_test_opcode.h @@ -13,8 +13,6 @@ namespace asmgen { -enum { kGenOpCodeInstCount = 2690 }; - // Generate all instructions asmjit can emit. static void opcode(asmjit::X86Assembler& a, bool useRex1 = false, bool useRex2 = false) { using namespace asmjit; diff --git a/src/test/asmjit_test_unit.cpp b/src/test/asmjit_test_unit.cpp index af1db85..b84ab32 100644 --- a/src/test/asmjit_test_unit.cpp +++ b/src/test/asmjit_test_unit.cpp @@ -6,8 +6,6 @@ // [Dependencies - AsmJit] #include "../asmjit/asmjit.h" -#include "../asmjit/base/compilercontext_p.h" -#include "../asmjit/x86/x86compilercontext_p.h" // ============================================================================ // [DumpCpu] @@ -18,101 +16,129 @@ struct DumpCpuFeature { const char* name; }; -static void dumpCpuFeatures(const asmjit::CpuInfo* cpuInfo, const DumpCpuFeature* data, size_t count) { +static void dumpCpuFeatures(const asmjit::CpuInfo& cpu, const DumpCpuFeature* data, size_t count) { for (size_t i = 0; i < count; i++) - if (cpuInfo->hasFeature(data[i].feature)) + if (cpu.hasFeature(data[i].feature)) INFO(" %s", data[i].name); } static void dumpCpu(void) { - const asmjit::CpuInfo* cpu = asmjit::CpuInfo::getHost(); + const asmjit::CpuInfo& cpu = asmjit::CpuInfo::getHost(); - INFO("Host CPU Info:"); - INFO(" Vendor string : %s", cpu->getVendorString()); - INFO(" Brand string : %s", cpu->getBrandString()); - INFO(" Family : %u", cpu->getFamily()); - INFO(" Model : %u", cpu->getModel()); - INFO(" Stepping : %u", cpu->getStepping()); - INFO(" HW-Threads Count : %u", cpu->getHwThreadsCount()); + INFO("Host CPU:"); + INFO(" Vendor string : %s", cpu.getVendorString()); + INFO(" Brand string : %s", cpu.getBrandString()); + INFO(" Family : %u", cpu.getFamily()); + INFO(" Model : %u", cpu.getModel()); + INFO(" Stepping : %u", cpu.getStepping()); + INFO(" HW-Threads Count : %u", cpu.getHwThreadsCount()); INFO(""); // -------------------------------------------------------------------------- - // [X86] + // [ARM / ARM64] + // -------------------------------------------------------------------------- + +#if ASMJIT_ARCH_ARM32 || ASMJIT_ARCH_ARM64 + static const DumpCpuFeature armFeaturesList[] = { + { asmjit::CpuInfo::kArmFeatureV6 , "ARMv6" }, + { asmjit::CpuInfo::kArmFeatureV7 , "ARMv7" }, + { asmjit::CpuInfo::kArmFeatureV8 , "ARMv8" }, + { asmjit::CpuInfo::kArmFeatureTHUMB , "THUMB" }, + { asmjit::CpuInfo::kArmFeatureTHUMB2 , "THUMBv2" }, + { asmjit::CpuInfo::kArmFeatureVFP2 , "VFPv2" }, + { asmjit::CpuInfo::kArmFeatureVFP3 , "VFPv3" }, + { asmjit::CpuInfo::kArmFeatureVFP4 , "VFPv4" }, + { asmjit::CpuInfo::kArmFeatureVFP_D32 , "VFP D32" }, + { asmjit::CpuInfo::kArmFeatureNEON , "NEON" }, + { asmjit::CpuInfo::kArmFeatureDSP , "DSP" }, + { asmjit::CpuInfo::kArmFeatureIDIV , "IDIV" }, + { asmjit::CpuInfo::kArmFeatureAES , "AES" }, + { asmjit::CpuInfo::kArmFeatureCRC32 , "CRC32" }, + { asmjit::CpuInfo::kArmFeatureSHA1 , "SHA1" }, + { asmjit::CpuInfo::kArmFeatureSHA256 , "SHA256" }, + { asmjit::CpuInfo::kArmFeatureAtomics64 , "64-bit atomics" } + }; + + INFO("ARM Features:"); + dumpCpuFeatures(cpu, armFeaturesList, ASMJIT_ARRAY_SIZE(armFeaturesList)); + INFO(""); +#endif + + // -------------------------------------------------------------------------- + // [X86 / X64] // -------------------------------------------------------------------------- #if ASMJIT_ARCH_X86 || ASMJIT_ARCH_X64 - const asmjit::X86CpuInfo* x86Cpu = static_cast(cpu); - static const DumpCpuFeature x86FeaturesList[] = { - { asmjit::kX86CpuFeatureNX , "NX (Non-Execute Bit)" }, - { asmjit::kX86CpuFeatureMT , "MT (Multi-Threading)" }, - { asmjit::kX86CpuFeatureRDTSC , "RDTSC" }, - { asmjit::kX86CpuFeatureRDTSCP , "RDTSCP" }, - { asmjit::kX86CpuFeatureCMOV , "CMOV" }, - { asmjit::kX86CpuFeatureCMPXCHG8B , "CMPXCHG8B" }, - { asmjit::kX86CpuFeatureCMPXCHG16B , "CMPXCHG16B" }, - { asmjit::kX86CpuFeatureCLFLUSH , "CLFLUSH" }, - { asmjit::kX86CpuFeatureCLFLUSH_OPT , "CLFLUSH (Opt)" }, - { asmjit::kX86CpuFeaturePREFETCH , "PREFETCH" }, - { asmjit::kX86CpuFeaturePREFETCHWT1 , "PREFETCHWT1" }, - { asmjit::kX86CpuFeatureLahfSahf , "LAHF/SAHF" }, - { asmjit::kX86CpuFeatureFXSR , "FXSR" }, - { asmjit::kX86CpuFeatureFXSR_OPT , "FXSR (Opt)" }, - { asmjit::kX86CpuFeatureMMX , "MMX" }, - { asmjit::kX86CpuFeatureMMX2 , "MMX2" }, - { asmjit::kX86CpuFeature3DNOW , "3DNOW" }, - { asmjit::kX86CpuFeature3DNOW2 , "3DNOW2" }, - { asmjit::kX86CpuFeatureSSE , "SSE" }, - { asmjit::kX86CpuFeatureSSE2 , "SSE2" }, - { asmjit::kX86CpuFeatureSSE3 , "SSE3" }, - { asmjit::kX86CpuFeatureSSSE3 , "SSSE3" }, - { asmjit::kX86CpuFeatureSSE4A , "SSE4A" }, - { asmjit::kX86CpuFeatureSSE4_1 , "SSE4.1" }, - { asmjit::kX86CpuFeatureSSE4_2 , "SSE4.2" }, - { asmjit::kX86CpuFeatureMSSE , "Misaligned SSE" }, - { asmjit::kX86CpuFeatureMONITOR , "MONITOR/MWAIT" }, - { asmjit::kX86CpuFeatureMOVBE , "MOVBE" }, - { asmjit::kX86CpuFeaturePOPCNT , "POPCNT" }, - { asmjit::kX86CpuFeatureLZCNT , "LZCNT" }, - { asmjit::kX86CpuFeatureAESNI , "AESNI" }, - { asmjit::kX86CpuFeaturePCLMULQDQ , "PCLMULQDQ" }, - { asmjit::kX86CpuFeatureRDRAND , "RDRAND" }, - { asmjit::kX86CpuFeatureRDSEED , "RDSEED" }, - { asmjit::kX86CpuFeatureSHA , "SHA" }, - { asmjit::kX86CpuFeatureXSAVE , "XSAVE" }, - { asmjit::kX86CpuFeatureXSAVE_OS , "XSAVE (OS)" }, - { asmjit::kX86CpuFeatureAVX , "AVX" }, - { asmjit::kX86CpuFeatureAVX2 , "AVX2" }, - { asmjit::kX86CpuFeatureF16C , "F16C" }, - { asmjit::kX86CpuFeatureFMA3 , "FMA3" }, - { asmjit::kX86CpuFeatureFMA4 , "FMA4" }, - { asmjit::kX86CpuFeatureXOP , "XOP" }, - { asmjit::kX86CpuFeatureBMI , "BMI" }, - { asmjit::kX86CpuFeatureBMI2 , "BMI2" }, - { asmjit::kX86CpuFeatureHLE , "HLE" }, - { asmjit::kX86CpuFeatureRTM , "RTM" }, - { asmjit::kX86CpuFeatureADX , "ADX" }, - { asmjit::kX86CpuFeatureMPX , "MPX" }, - { asmjit::kX86CpuFeatureFSGSBASE , "FS/GS Base" }, - { asmjit::kX86CpuFeatureMOVSBSTOSB_OPT, "REP MOVSB/STOSB (Opt)" }, - { asmjit::kX86CpuFeatureAVX512F , "AVX512F" }, - { asmjit::kX86CpuFeatureAVX512CD , "AVX512CD" }, - { asmjit::kX86CpuFeatureAVX512PF , "AVX512PF" }, - { asmjit::kX86CpuFeatureAVX512ER , "AVX512ER" }, - { asmjit::kX86CpuFeatureAVX512DQ , "AVX512DQ" }, - { asmjit::kX86CpuFeatureAVX512BW , "AVX512BW" }, - { asmjit::kX86CpuFeatureAVX512VL , "AVX512VL" } + { asmjit::CpuInfo::kX86FeatureNX , "NX (Non-Execute Bit)" }, + { asmjit::CpuInfo::kX86FeatureMT , "MT (Multi-Threading)" }, + { asmjit::CpuInfo::kX86FeatureRDTSC , "RDTSC" }, + { asmjit::CpuInfo::kX86FeatureRDTSCP , "RDTSCP" }, + { asmjit::CpuInfo::kX86FeatureCMOV , "CMOV" }, + { asmjit::CpuInfo::kX86FeatureCMPXCHG8B , "CMPXCHG8B" }, + { asmjit::CpuInfo::kX86FeatureCMPXCHG16B , "CMPXCHG16B" }, + { asmjit::CpuInfo::kX86FeatureCLFLUSH , "CLFLUSH" }, + { asmjit::CpuInfo::kX86FeatureCLFLUSH_OPT , "CLFLUSH (Opt)" }, + { asmjit::CpuInfo::kX86FeaturePREFETCH , "PREFETCH" }, + { asmjit::CpuInfo::kX86FeaturePREFETCHWT1 , "PREFETCHWT1" }, + { asmjit::CpuInfo::kX86FeatureLAHF_SAHF , "LAHF/SAHF" }, + { asmjit::CpuInfo::kX86FeatureFXSR , "FXSR" }, + { asmjit::CpuInfo::kX86FeatureFXSR_OPT , "FXSR (Opt)" }, + { asmjit::CpuInfo::kX86FeatureMMX , "MMX" }, + { asmjit::CpuInfo::kX86FeatureMMX2 , "MMX2" }, + { asmjit::CpuInfo::kX86Feature3DNOW , "3DNOW" }, + { asmjit::CpuInfo::kX86Feature3DNOW2 , "3DNOW2" }, + { asmjit::CpuInfo::kX86FeatureSSE , "SSE" }, + { asmjit::CpuInfo::kX86FeatureSSE2 , "SSE2" }, + { asmjit::CpuInfo::kX86FeatureSSE3 , "SSE3" }, + { asmjit::CpuInfo::kX86FeatureSSSE3 , "SSSE3" }, + { asmjit::CpuInfo::kX86FeatureSSE4A , "SSE4A" }, + { asmjit::CpuInfo::kX86FeatureSSE4_1 , "SSE4.1" }, + { asmjit::CpuInfo::kX86FeatureSSE4_2 , "SSE4.2" }, + { asmjit::CpuInfo::kX86FeatureMSSE , "Misaligned SSE" }, + { asmjit::CpuInfo::kX86FeatureMONITOR , "MONITOR/MWAIT" }, + { asmjit::CpuInfo::kX86FeatureMOVBE , "MOVBE" }, + { asmjit::CpuInfo::kX86FeaturePOPCNT , "POPCNT" }, + { asmjit::CpuInfo::kX86FeatureLZCNT , "LZCNT" }, + { asmjit::CpuInfo::kX86FeatureAESNI , "AESNI" }, + { asmjit::CpuInfo::kX86FeaturePCLMULQDQ , "PCLMULQDQ" }, + { asmjit::CpuInfo::kX86FeatureRDRAND , "RDRAND" }, + { asmjit::CpuInfo::kX86FeatureRDSEED , "RDSEED" }, + { asmjit::CpuInfo::kX86FeatureSHA , "SHA" }, + { asmjit::CpuInfo::kX86FeatureXSAVE , "XSAVE" }, + { asmjit::CpuInfo::kX86FeatureXSAVE_OS , "XSAVE (OS)" }, + { asmjit::CpuInfo::kX86FeatureAVX , "AVX" }, + { asmjit::CpuInfo::kX86FeatureAVX2 , "AVX2" }, + { asmjit::CpuInfo::kX86FeatureF16C , "F16C" }, + { asmjit::CpuInfo::kX86FeatureFMA3 , "FMA3" }, + { asmjit::CpuInfo::kX86FeatureFMA4 , "FMA4" }, + { asmjit::CpuInfo::kX86FeatureXOP , "XOP" }, + { asmjit::CpuInfo::kX86FeatureBMI , "BMI" }, + { asmjit::CpuInfo::kX86FeatureBMI2 , "BMI2" }, + { asmjit::CpuInfo::kX86FeatureHLE , "HLE" }, + { asmjit::CpuInfo::kX86FeatureRTM , "RTM" }, + { asmjit::CpuInfo::kX86FeatureADX , "ADX" }, + { asmjit::CpuInfo::kX86FeatureMPX , "MPX" }, + { asmjit::CpuInfo::kX86FeatureFSGSBASE , "FS/GS Base" }, + { asmjit::CpuInfo::kX86FeatureMOVSBSTOSB_OPT, "REP MOVSB/STOSB (Opt)" }, + { asmjit::CpuInfo::kX86FeatureAVX512F , "AVX512F" }, + { asmjit::CpuInfo::kX86FeatureAVX512CD , "AVX512CD" }, + { asmjit::CpuInfo::kX86FeatureAVX512PF , "AVX512PF" }, + { asmjit::CpuInfo::kX86FeatureAVX512ER , "AVX512ER" }, + { asmjit::CpuInfo::kX86FeatureAVX512DQ , "AVX512DQ" }, + { asmjit::CpuInfo::kX86FeatureAVX512BW , "AVX512BW" }, + { asmjit::CpuInfo::kX86FeatureAVX512VL , "AVX512VL" } }; - INFO("Host CPU Info (X86/X64):"); - INFO(" Processor Type : %u", x86Cpu->getProcessorType()); - INFO(" Brand Index : %u", x86Cpu->getBrandIndex()); - INFO(" CL Flush Cache Line : %u", x86Cpu->getFlushCacheLineSize()); - INFO(" Max logical Processors : %u", x86Cpu->getMaxLogicalProcessors()); + INFO("X86 Specific:"); + INFO(" Processor Type : %u", cpu.getX86ProcessorType()); + INFO(" Brand Index : %u", cpu.getX86BrandIndex()); + INFO(" CL Flush Cache Line : %u", cpu.getX86FlushCacheLineSize()); + INFO(" Max logical Processors : %u", cpu.getX86MaxLogicalProcessors()); INFO(""); - INFO("Host CPU Features (X86/X64):"); - dumpCpuFeatures(x86Cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList)); + INFO("X86 Features:"); + dumpCpuFeatures(cpu, x86FeaturesList, ASMJIT_ARRAY_SIZE(x86FeaturesList)); INFO(""); #endif } @@ -137,14 +163,17 @@ static void dumpSizeOf(void) { DUMP_TYPE(float); DUMP_TYPE(double); DUMP_TYPE(void*); + DUMP_TYPE(asmjit::Ptr); + DUMP_TYPE(asmjit::SignedPtr); INFO(""); INFO("SizeOf Base:"); + DUMP_TYPE(asmjit::Assembler); DUMP_TYPE(asmjit::ConstPool); + DUMP_TYPE(asmjit::LabelData); + DUMP_TYPE(asmjit::RelocData); DUMP_TYPE(asmjit::Runtime); DUMP_TYPE(asmjit::Zone); - DUMP_TYPE(asmjit::Ptr); - DUMP_TYPE(asmjit::SignedPtr); INFO(""); INFO("SizeOf Operand:"); @@ -156,19 +185,9 @@ static void dumpSizeOf(void) { DUMP_TYPE(asmjit::Label); INFO(""); - INFO("SizeOf Assembler:"); - DUMP_TYPE(asmjit::Assembler); - DUMP_TYPE(asmjit::LabelData); - DUMP_TYPE(asmjit::RelocData); - INFO(""); - #if !defined(ASMJIT_DISABLE_COMPILER) INFO("SizeOf Compiler:"); DUMP_TYPE(asmjit::Compiler); - DUMP_TYPE(asmjit::VarMap); - DUMP_TYPE(asmjit::VarAttr); - DUMP_TYPE(asmjit::VarData); - DUMP_TYPE(asmjit::VarState); DUMP_TYPE(asmjit::HLNode); DUMP_TYPE(asmjit::HLInst); DUMP_TYPE(asmjit::HLJump); @@ -197,9 +216,6 @@ static void dumpSizeOf(void) { #if !defined(ASMJIT_DISABLE_COMPILER) DUMP_TYPE(asmjit::X86Compiler); - DUMP_TYPE(asmjit::X86VarMap); - DUMP_TYPE(asmjit::X86VarInfo); - DUMP_TYPE(asmjit::X86VarState); DUMP_TYPE(asmjit::X86CallNode); DUMP_TYPE(asmjit::X86FuncNode); DUMP_TYPE(asmjit::X86FuncDecl); diff --git a/src/test/asmjit_test_x86.cpp b/src/test/asmjit_test_x86.cpp index 759ff75..5f8e1c9 100644 --- a/src/test/asmjit_test_x86.cpp +++ b/src/test/asmjit_test_x86.cpp @@ -189,9 +189,9 @@ struct X86Test_JumpCross : public X86Test { virtual void compile(X86Compiler& c) { c.addFunc(FuncBuilder0(kCallConvHost)); - Label L_1(c); - Label L_2(c); - Label L_3(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); + Label L_3 = c.newLabel(); c.jmp(L_2); @@ -230,13 +230,13 @@ struct X86Test_JumpUnreachable1 : public X86Test { virtual void compile(X86Compiler& c) { c.addFunc(FuncBuilder0(kCallConvHost)); - Label L_1(c); - Label L_2(c); - Label L_3(c); - Label L_4(c); - Label L_5(c); - Label L_6(c); - Label L_7(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); + Label L_3 = c.newLabel(); + Label L_4 = c.newLabel(); + Label L_5 = c.newLabel(); + Label L_6 = c.newLabel(); + Label L_7 = c.newLabel(); X86GpVar v0 = c.newUInt32("v0"); X86GpVar v1 = c.newUInt32("v1"); @@ -292,8 +292,8 @@ struct X86Test_JumpUnreachable2 : public X86Test { virtual void compile(X86Compiler& c) { c.addFunc(FuncBuilder0(kCallConvHost)); - Label L_1(c); - Label L_2(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); X86GpVar v0 = c.newUInt32("v0"); X86GpVar v1 = c.newUInt32("v1"); @@ -397,7 +397,7 @@ struct X86Test_AllocManual : public X86Test { c.spill(v0); c.spill(v1); - Label L(c); + Label L = c.newLabel(); c.mov(cnt, 32); c.bind(L); @@ -449,7 +449,7 @@ struct X86Test_AllocUseMem : public X86Test { X86GpVar aIdx = c.newInt32("aIdx"); X86GpVar aEnd = c.newInt32("aEnd"); - Label L_1(c); + Label L_1 = c.newLabel(); c.setArg(0, aIdx); c.setArg(1, aEnd); @@ -589,7 +589,7 @@ struct X86Test_AllocMany2 : public X86Test { } X86GpVar v0 = c.newInt32("v0"); - Label L(c); + Label L = c.newLabel(); c.mov(v0, 32); c.bind(L); @@ -1034,8 +1034,8 @@ struct X86Test_AllocIfElse1 : public X86Test { X86GpVar v1 = c.newInt32("v1"); X86GpVar v2 = c.newInt32("v2"); - Label L_1(c); - Label L_2(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); c.setArg(0, v1); c.setArg(1, v2); @@ -1085,10 +1085,10 @@ struct X86Test_AllocIfElse2 : public X86Test { X86GpVar v1 = c.newInt32("v1"); X86GpVar v2 = c.newInt32("v2"); - Label L_1(c); - Label L_2(c); - Label L_3(c); - Label L_4(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); + Label L_3 = c.newLabel(); + Label L_4 = c.newLabel(); c.setArg(0, v1); c.setArg(1, v2); @@ -1146,9 +1146,9 @@ struct X86Test_AllocIfElse3 : public X86Test { X86GpVar v2 = c.newInt32("v2"); X86GpVar counter = c.newInt32("counter"); - Label L_1(c); - Label L_Loop(c); - Label L_Exit(c); + Label L_1 = c.newLabel(); + Label L_Loop = c.newLabel(); + Label L_Exit = c.newLabel(); c.setArg(0, v1); c.setArg(1, v2); @@ -1206,10 +1206,10 @@ struct X86Test_AllocIfElse4 : public X86Test { X86GpVar v2 = c.newInt32("v2"); X86GpVar counter = c.newInt32("counter"); - Label L_1(c); - Label L_Loop1(c); - Label L_Loop2(c); - Label L_Exit(c); + Label L_1 = c.newLabel(); + Label L_Loop1 = c.newLabel(); + Label L_Loop2 = c.newLabel(); + Label L_Exit = c.newLabel(); c.mov(counter, 0); @@ -1556,8 +1556,8 @@ struct X86Test_AllocStack : public X86Test { X86GpVar a = c.newInt32("a"); X86GpVar b = c.newInt32("b"); - Label L_1(c); - Label L_2(c); + Label L_1 = c.newLabel(); + Label L_2 = c.newLabel(); // Fill stack by sequence [0, 1, 2, 3 ... 255]. c.xor_(i, i); @@ -1615,8 +1615,8 @@ struct X86Test_AllocMemcpy : public X86Test { X86GpVar src = c.newIntPtr("src"); X86GpVar cnt = c.newUIntPtr("cnt"); - Label L_Loop(c); // Create base labels we use - Label L_Exit(c); // in our function. + Label L_Loop = c.newLabel(); // Create base labels we use + Label L_Exit = c.newLabel(); // in our function. c.addFunc(FuncBuilder3(kCallConvHost)); c.setArg(0, dst); @@ -2247,8 +2247,8 @@ struct X86Test_CallConditional : public X86Test { c.setArg(1, y); c.setArg(2, op); - Label opAdd(c); - Label opMul(c); + Label opAdd = c.newLabel(); + Label opMul = c.newLabel(); c.cmp(op, 0); c.jz(opAdd); @@ -2392,7 +2392,7 @@ struct X86Test_CallRecursive : public X86Test { virtual void compile(X86Compiler& c) { X86GpVar val = c.newInt32("val"); - Label skip(c); + Label skip = c.newLabel(); X86FuncNode* func = c.addFunc(FuncBuilder1(kCallConvHost)); c.setArg(0, val); @@ -2443,7 +2443,7 @@ struct X86Test_CallMisc1 : public X86Test { virtual void compile(X86Compiler& c) { X86GpVar val = c.newInt32("val"); - Label skip(c); + Label skip = c.newLabel(); X86FuncNode* func = c.addFunc(FuncBuilder2(kCallConvHost)); @@ -2690,11 +2690,11 @@ struct X86Test_MiscMultiRet : public X86Test { X86GpVar a = c.newInt32("a"); X86GpVar b = c.newInt32("b"); - Label L_Zero(c); - Label L_Add(c); - Label L_Sub(c); - Label L_Mul(c); - Label L_Div(c); + Label L_Zero = c.newLabel(); + Label L_Add = c.newLabel(); + Label L_Sub = c.newLabel(); + Label L_Mul = c.newLabel(); + Label L_Div = c.newLabel(); c.setArg(0, op); c.setArg(1, a); @@ -2788,7 +2788,7 @@ struct X86Test_MiscUnfollow : public X86Test { X86GpVar a = c.newInt32("a"); X86GpVar b = c.newIntPtr("b"); - Label tramp(c); + Label tramp = c.newLabel(); c.setArg(0, a); c.setArg(1, b); @@ -2939,10 +2939,10 @@ int X86TestSuite::run() { FILE* file = stdout; FileLogger fileLogger(file); - fileLogger.setOption(kLoggerOptionBinaryForm, true); + fileLogger.addOptions(Logger::kOptionBinaryForm); StringLogger stringLogger; - stringLogger.setOption(kLoggerOptionBinaryForm, true); + stringLogger.addOptions(Logger::kOptionBinaryForm); for (i = 0; i < count; i++) { JitRuntime runtime; diff --git a/src/test/genblend.h b/src/test/genblend.h index 5e18c43..fbd7bbd 100644 --- a/src/test/genblend.h +++ b/src/test/genblend.h @@ -13,8 +13,6 @@ namespace asmgen { -enum { kGenBlendInstCount = 65 }; - // Generate a typical alpha blend function using SSE2 instruction set. Used // for benchmarking and also in test86. The generated code should be stable // and fully functional. @@ -29,25 +27,25 @@ static void blend(asmjit::X86Compiler& c) { X86GpVar j = c.newIntPtr("j"); X86GpVar t = c.newIntPtr("t"); - X86XmmVar cZero = c.newXmm("cZero"); - X86XmmVar cMul255A = c.newXmm("cMul255A"); - X86XmmVar cMul255M = c.newXmm("cMul255M"); - X86XmmVar x0 = c.newXmm("x0"); X86XmmVar x1 = c.newXmm("x1"); X86XmmVar y0 = c.newXmm("y0"); X86XmmVar a0 = c.newXmm("a0"); X86XmmVar a1 = c.newXmm("a1"); - Label L_SmallLoop(c); - Label L_SmallEnd(c); + X86XmmVar cZero = c.newXmm("cZero"); + X86XmmVar cMul255A = c.newXmm("cMul255A"); + X86XmmVar cMul255M = c.newXmm("cMul255M"); - Label L_LargeLoop(c); - Label L_LargeEnd(c); + Label L_SmallLoop = c.newLabel(); + Label L_SmallEnd = c.newLabel(); - Label L_Data(c); + Label L_LargeLoop = c.newLabel(); + Label L_LargeEnd = c.newLabel(); - c.addFunc(FuncBuilder3(kCallConvHost)); + Label L_Data = c.newLabel(); + + c.addFunc(FuncBuilder3(c.getRuntime()->getCdeclConv())); c.setArg(0, dst); c.setArg(1, src); @@ -170,8 +168,8 @@ static void blend(asmjit::X86Compiler& c) { // Data. c.align(kAlignData, 16); c.bind(L_Data); - c.dxmm(Vec128::fromSw(0x0080)); - c.dxmm(Vec128::fromSw(0x0101)); + c.dxmm(Vec128::fromSW(0x0080)); + c.dxmm(Vec128::fromSW(0x0101)); } } // asmgen namespace diff --git a/tools/configure-unix-makefiles-dbg.sh b/tools/configure-unix-makefiles-dbg.sh index 84ca80a..f127cf2 100644 --- a/tools/configure-unix-makefiles-dbg.sh +++ b/tools/configure-unix-makefiles-dbg.sh @@ -5,5 +5,5 @@ ASMJIT_BUILD_DIR="build_makefiles_dbg" mkdir ../${ASMJIT_BUILD_DIR} cd ../${ASMJIT_BUILD_DIR} -cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1 -DASMJIT_BUILD_SAMPLES=1 +cmake .. -G"Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DASMJIT_BUILD_TEST=1 cd ${ASMJIT_CURRENT_DIR}