[ABI] Refactored AsmJit to use strong-typed enums, this breaks both API and ABI

[ABI] Added ABI version as an inline namespace, which forms asmjit::_abi_MAJOR_MINOR
[ABI] Added support for AVX512_FP16, 16-bit broadcast, and AVX512_FP16 tests
[ABI] Added initial support for consecutive registers into instruction database and register allocator
[ABI] Added a possibility to use temporary memory in CodeHolder's zone
[ABI] Compiler::setArg() is now deprecated, use FuncNode::setArg()
[Bug] Fixed correct RW information of instructions that only support implicit zeroing with {k}
[Bug] Fixed broadcast to be able to broadcast bcst16 operands
This commit is contained in:
kobalicek
2021-12-13 09:11:58 +01:00
parent 4ec760a3d1
commit 996deae327
154 changed files with 29537 additions and 21190 deletions

View File

@@ -19,6 +19,10 @@
"cmd": ["asmjit_test_assembler", "--quiet"],
"optional": true
},
{
"cmd": ["asmjit_test_assembler", "--quiet", "--validate"],
"optional": true
},
{
"cmd": ["asmjit_test_emitters"],
"optional": true

View File

@@ -31,58 +31,59 @@ jobs:
fail-fast: false
matrix:
include:
- { title: "linux-lib" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", problem_matcher: "cpp" }
- { title: "windows-lib" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Debug" , problem_matcher: "cpp" }
- { title: "linux-lib" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", problem_matcher: "cpp" }
- { title: "windows-lib" , os: "windows-2022" , cc: "vs2022" , arch: "x86", build_type: "Debug" , problem_matcher: "cpp" }
- { title: "diag-asan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "address" }
- { title: "diag-ubsan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "undefined" }
- { title: "diag-valgrind" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "valgrind" }
- { title: "diag-asan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "address" }
- { title: "diag-ubsan" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "undefined" }
- { title: "diag-valgrind" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON", diagnostics: "valgrind" }
- { title: "diag-scan-build", os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON", diagnostics: "scan-build" }
- { title: "no-deprecated" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_DEPRECATED=1" }
- { title: "no-intrinsics" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_INTRINSICS=1" }
- { title: "no-logging" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_LOGGING=1" }
- { title: "no-builder" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_BUILDER=1" }
- { title: "no-compiler" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_COMPILER=1" }
- { title: "no-deprecated" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_DEPRECATED=1" }
- { title: "no-intrinsics" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_INTRINSICS=1" }
- { title: "no-logging" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_LOGGING=1" }
- { title: "no-builder" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_BUILDER=1" }
- { title: "no-compiler" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON,ASMJIT_NO_COMPILER=1" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-5" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-5" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-6" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-6" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-7" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-7" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "gcc" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-4.8" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-5" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-5" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-6" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-18.04" , cc: "gcc-6" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-7" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-7" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-8" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-8" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "gcc-10" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-latest" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "linux" , os: "ubuntu-20.04" , cc: "clang-10", arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "gcc-9" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "macos-10.15" , os: "macos-10.15" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
# Disabled, because of GitHub actions infrastructure issues (builds not starting).
#- { title: "macos-11.0" , os: "macos-11.0" , cc: "gcc-10" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
@@ -90,10 +91,15 @@ jobs:
#- { title: "macos-11.0" , os: "macos-11.0" , cc: "clang" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
#- { title: "macos-11.0" , os: "macos-11.0" , cc: "clang" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-latest", cc: "vs2019" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2019" , cc: "vs2019" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2019" , cc: "vs2019" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2019" , cc: "vs2019" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2019" , cc: "vs2019" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2022" , cc: "vs2022" , arch: "x86", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2022" , cc: "vs2022" , arch: "x86", build_type: "Release", defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2022" , cc: "vs2022" , arch: "x64", build_type: "Debug" , defs: "ASMJIT_TEST=ON" }
- { title: "windows" , os: "windows-2022" , cc: "vs2022" , arch: "x64", build_type: "Release", defs: "ASMJIT_TEST=ON" }
name: "${{matrix.title}} (${{matrix.cc}}, ${{matrix.arch}}, ${{matrix.build_type}})"
runs-on: "${{matrix.os}}"

View File

@@ -19,9 +19,8 @@ endif()
include(CheckCXXCompilerFlag)
include(GNUInstallDirs)
# =============================================================================
# [AsmJit - Deprecated]
# =============================================================================
# AsmJit - Deprecated
# ===================
if (DEFINED ASMJIT_BUILD_EMBED)
message(DEPRECATION "ASMJIT_BUILD_EMBED is deprecated, use ASMJIT_EMBED")
@@ -33,9 +32,8 @@ if (DEFINED ASMJIT_BUILD_STATIC)
set(ASMJIT_STATIC "${ASMJIT_BUILD_STATIC}")
endif()
# =============================================================================
# [AsmJit - Configuration]
# =============================================================================
# AsmJit - Configuration
# ======================
if (NOT DEFINED ASMJIT_TEST)
set(ASMJIT_TEST FALSE)
@@ -84,9 +82,8 @@ set(ASMJIT_NO_FOREIGN "${ASMJIT_NO_FOREIGN}" CACHE BOOL "Disable all f
set(ASMJIT_NO_NATVIS "${ASMJIT_NO_NATVIS}" CACHE BOOL "Disable natvis support (embedding asmjit.natvis in PDB)")
set(ASMJIT_NO_CUSTOM_FLAGS "${ASMJIT_NO_CUSTOM_FLAGS}" CACHE BOOL "Disable extra compilation flags added by AsmJit to its targets")
# =============================================================================
# [AsmJit - Project]
# =============================================================================
# AsmJit - Project
# ================
set(ASMJIT_INCLUDE_DIRS "${ASMJIT_DIR}/src") # Include directory is the same as source dir.
set(ASMJIT_DEPS "") # AsmJit dependencies (libraries) for the linker.
@@ -98,9 +95,8 @@ set(ASMJIT_PRIVATE_CFLAGS_REL "") # Private compiler flags used b
set(ASMJIT_SANITIZE_CFLAGS "") # Compiler flags required by currently enabled sanitizers.
set(ASMJIT_SANITIZE_LFLAGS "") # Linker flags required by currently enabled sanitizers.
# =============================================================================
# [AsmJit - Utilities]
# =============================================================================
# AsmJit - Utilities
# ==================
function(asmjit_detect_cflags out)
set(out_array ${${out}})
@@ -172,9 +168,8 @@ function(asmjit_add_target target target_type)
endif()
endfunction()
# =============================================================================
# [AsmJit - Compiler Support]
# =============================================================================
# AsmJit - Compiler Support
# =========================
set(ASMJIT_INCLUDE_DIRS "${ASMJIT_DIR}/src") # Include directory is the same as source dir.
set(ASMJIT_DEPS "") # AsmJit dependencies (libraries) for the linker.
@@ -283,9 +278,8 @@ foreach(build_option ASMJIT_STATIC
endif()
endforeach()
# =============================================================================
# [AsmJit - Linker Support]
# =============================================================================
# AsmJit - Linker Support
# =======================
if (WIN32)
if(CMAKE_LINKER MATCHES "link\\.exe" OR CMAKE_LINKER MATCHES "lld-link\\.exe")
@@ -293,9 +287,8 @@ if (WIN32)
endif()
endif()
# =============================================================================
# [AsmJit - Source]
# =============================================================================
# AsmJit - Source
# ===============
set(ASMJIT_SRC_LIST
asmjit/asmjit.h
@@ -324,7 +317,6 @@ set(ASMJIT_SRC_LIST
asmjit/core/constpool.h
asmjit/core/cpuinfo.cpp
asmjit/core/cpuinfo.h
asmjit/core/datatypes.h
asmjit/core/emithelper.cpp
asmjit/core/emithelper_p.h
asmjit/core/emitter.cpp
@@ -335,7 +327,6 @@ set(ASMJIT_SRC_LIST
asmjit/core/environment.h
asmjit/core/errorhandler.cpp
asmjit/core/errorhandler.h
asmjit/core/features.h
asmjit/core/formatter.cpp
asmjit/core/formatter.h
asmjit/core/func.cpp
@@ -401,8 +392,6 @@ set(ASMJIT_SRC_LIST
asmjit/x86/x86emithelper.cpp
asmjit/x86/x86emithelper_p.h
asmjit/x86/x86emitter.h
asmjit/x86/x86features.cpp
asmjit/x86/x86features.h
asmjit/x86/x86formatter.cpp
asmjit/x86/x86formatter_p.h
asmjit/x86/x86func.cpp
@@ -439,9 +428,8 @@ if (NOT ${CMAKE_VERSION} VERSION_LESS "3.8.0")
source_group(TREE "${ASMJIT_DIR}" FILES ${ASMJIT_SRC})
endif()
# =============================================================================
# [AsmJit - Summary]
# =============================================================================
# AsmJit - Summary
# ================
message("** AsmJit Summary **")
message(" ASMJIT_DIR=${ASMJIT_DIR}")
@@ -454,9 +442,8 @@ message(" ASMJIT_PRIVATE_CFLAGS=${ASMJIT_PRIVATE_CFLAGS}")
message(" ASMJIT_PRIVATE_CFLAGS_DBG=${ASMJIT_PRIVATE_CFLAGS_DBG}")
message(" ASMJIT_PRIVATE_CFLAGS_REL=${ASMJIT_PRIVATE_CFLAGS_REL}")
# =============================================================================
# [AsmJit - Targets]
# =============================================================================
# AsmJit - Targets
# ================
if (NOT ASMJIT_EMBED)
# Add AsmJit target.

View File

@@ -47,12 +47,10 @@ TODO
* [ ] Core:
* [ ] Add support for user external buffers in CodeBuffer / CodeHolder.
* [ ] Register allocator doesn't understand register pairs, affected instructions:
* [ ] v4fmaddps, v4fmaddss, v4fnmaddps, v4fnmaddss
* [ ] vp4dpwssd, vp4dpwssds
* [ ] vp2intersectd, vp2intersectq
* [ ] Ports:
* [ ] ARM/Thumb/AArch64 support.
* [ ] 32-bit ARM/Thumb port.
* [ ] 64-bit ARM (AArch64) port.
* [ ] RISC-V port.
Support
-------

View File

@@ -34,50 +34,94 @@
</Expand>
</Type>
<Type Name="asmjit::OperandSignature">
<Intrinsic Name="opType" Expression="(asmjit::OperandType)(_bits &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_bits &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(asmjit::RegType)((_bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="regGroup" Expression="(asmjit::RegGroup)((_bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="memBaseType" Expression="(asmjit::RegType)((_bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="memIndexType" Expression="(asmjit::RegType)((_bits &gt;&gt; 8) &amp; 0x1F)" />
<Intrinsic Name="memRegHome" Expression="(bool)((_bits &gt;&gt; 13) &amp; 0x1)" />
<Intrinsic Name="memX86Segment" Expression="(asmjit::x86::SReg::Id)((_bits &gt;&gt; 18) &amp; 0x7)" />
<Intrinsic Name="memX86AddrType" Expression="(asmjit::x86::Mem::AddrType)((_bits &gt;&gt; 14) &amp; 0x3)" />
<Intrinsic Name="memX86ShiftValue" Expression="((_bits &gt;&gt; 16) &amp; 0x3)" />
<Intrinsic Name="memX86Broadcast" Expression="(asmjit::x86::Mem::Broadcast)((_bits &gt;&gt; 21) &amp; 0x7)" />
<Intrinsic Name="immType" Expression="(asmjit::ImmType)((_bits &gt;&gt; 3) &amp; 0x1)" />
<DisplayString Condition="opType() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kReg">[Reg] {{ type={regType()} group={regGroup()} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kMem">[Mem] {{ base={memBaseType()} index={memIndexType()} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kImm">[Imm] {{ type={immType()} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kLabel">[Label]</DisplayString>
<DisplayString Condition="opType() &gt; asmjit::OperandType::kMaxValue">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="bits">_bits, X</Item>
<Item Name="op.type">opType()</Item>
<Item Name="reg.type" Condition="opType() == asmjit::OperandType::kReg">regType()</Item>
<Item Name="reg.group" Condition="opType() == asmjit::OperandType::kReg">regGroup()</Item>
<Item Name="reg.size" Condition="opType() == asmjit::OperandType::kReg">opSize(), d</Item>
<Item Name="mem.baseType" Condition="opType() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.indexType" Condition="opType() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.regHome" Condition="opType() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.size" Condition="opType() == asmjit::OperandType::kMem">opSize(), d</Item>
<Item Name="mem.x86.segment" Condition="opType() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addrType" Condition="opType() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="opType() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="opType() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="opType() == asmjit::OperandType::kImm">immType()</Item>
</Expand>
</Type>
<Type Name="asmjit::Operand_">
<Intrinsic Name="opType" Expression="(unsigned int)(_signature &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_signature &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="regGroup" Expression="(_signature &gt;&gt; 8) &amp; 0xF" />
<Intrinsic Name="memBaseType" Expression="(_signature &gt;&gt; 3) &amp; 0x1F" />
<Intrinsic Name="memIndexType" Expression="(_signature &gt;&gt; 8) &amp; 0x1F" />
<Intrinsic Name="memAddrType" Expression="(_signature &gt;&gt; 13) &amp; 0x3" />
<Intrinsic Name="memRegHome" Expression="(_signature &gt;&gt; 15) &amp; 0x1" />
<Intrinsic Name="opType" Expression="(asmjit::OperandType)(_signature._bits &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_signature._bits &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="regGroup" Expression="(asmjit::RegGroup)((_signature._bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="memBaseType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="memIndexType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 8) &amp; 0x1F)" />
<Intrinsic Name="memRegHome" Expression="(bool)((_signature._bits &gt;&gt; 13) &amp; 0x1)" />
<Intrinsic Name="memX86Segment" Expression="(asmjit::x86::SReg::Id)((_signature._bits &gt;&gt; 18) &amp; 0x7)" />
<Intrinsic Name="memX86AddrType" Expression="(asmjit::x86::Mem::AddrType)((_signature._bits &gt;&gt; 14) &amp; 0x3)" />
<Intrinsic Name="memX86ShiftValue" Expression="((_signature._bits &gt;&gt; 16) &amp; 0x3)" />
<Intrinsic Name="memX86Broadcast" Expression="(asmjit::x86::Mem::Broadcast)((_signature._bits &gt;&gt; 21) &amp; 0x7)" />
<Intrinsic Name="memBaseId" Expression="_baseId" />
<Intrinsic Name="memIndexId" Expression="_data[0]" />
<Intrinsic Name="memOffset32b" Expression="(__int64)int(_data[1])" />
<Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_baseId &lt;&lt; 32) | ((unsigned __int64)_data[1])" />
<Intrinsic Name="memOffset" Expression="memBaseType() != 0 ? memOffset32b() : memOffset64b()" />
<Intrinsic Name="memOffset" Expression="memBaseType() != asmjit::RegType::kNone ? memOffset32b() : memOffset64b()" />
<Intrinsic Name="immType" Expression="(asmjit::ImmType)((_signature._bits &gt;&gt; 3) &amp; 0x1)" />
<Intrinsic Name="immValue" Expression="((__int64)_data[1] &lt;&lt; 32) | (__int64)_data[0]" />
<DisplayString Condition="opType() == 0">[None]</DisplayString>
<DisplayString Condition="opType() == 1">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == 2">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="opType() == 3">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="opType() == 4">[Label] {{ id={_baseId} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kReg">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kMem">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kImm">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kLabel">[Label] {{ id={_baseId} }}</DisplayString>
<DisplayString Condition="opType() &gt; 4">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="_signature">_signature, X</Item>
<Item Name="_signature.any.type">(asmjit::Operand_::OpType)opType()</Item>
<Item Name="_signature.any.size">opSize(), d</Item>
<Item Name="_signature.reg.type" Condition="opType() == 1">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="_signature.reg.group" Condition="opType() == 1">(asmjit::BaseReg::RegGroup)regGroup()</Item>
<Item Name="_signature.mem.baseType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memBaseType()</Item>
<Item Name="_signature.mem.indexType" Condition="opType() == 2">(asmjit::BaseReg::RegType)memIndexType()</Item>
<Item Name="_signature.mem.addrType" Condition="opType() == 2">(asmjit::BaseMem::AddrType)memAddrType()</Item>
<Item Name="_signature.mem.regHome" Condition="opType() == 2">(bool)memRegHome()</Item>
<Item Name="_baseId">_baseId</Item>
<Item Name="_data[0]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[0]</Item>
<Item Name="_data[1]" Condition="opType() != 2 &amp;&amp; opType() != 3">_data[1]</Item>
<Item Name="_data[IndexId]" Condition="opType() == 2">_data[0]</Item>
<Item Name="_data[OffsetLo]" Condition="opType() == 2">_data[1]</Item>
<Item Name="_data[ImmHi]" Condition="opType() == 3">_data[0]</Item>
<Item Name="_data[ImmLo]" Condition="opType() == 3">_data[1]</Item>
<Item Name="_signature">_signature._bits, X</Item>
<Item Name="op.type">opType()</Item>
<Item Name="op.size">opSize(), d</Item>
<Item Name="reg.type" Condition="opType() == asmjit::OperandType::kReg">regType()</Item>
<Item Name="reg.group" Condition="opType() == asmjit::OperandType::kReg">regGroup()</Item>
<Item Name="reg.id" Condition="opType() == asmjit::OperandType::kReg">_baseId, d</Item>
<Item Name="mem.baseType" Condition="opType() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.baseId" Condition="opType() == asmjit::OperandType::kMem &amp;&amp; memBaseType() != asmjit::RegType::kNone">memBaseId()</Item>
<Item Name="mem.indexType" Condition="opType() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.indexId" Condition="opType() == asmjit::OperandType::kMem &amp;&amp; memIndexType() != asmjit::RegType::kNone">memIndexId()</Item>
<Item Name="mem.regHome" Condition="opType() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.offset" Condition="opType() == asmjit::OperandType::kMem">memOffset(), d</Item>
<Item Name="mem.x86.segment" Condition="opType() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addrType" Condition="opType() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="opType() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="opType() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="opType() == asmjit::OperandType::kImm">immType()</Item>
<Item Name="imm.value" Condition="opType() == asmjit::OperandType::kImm">immValue(), X</Item>
<Item Name="label.id" Condition="opType() == asmjit::OperandType::kLabel">_baseId, d</Item>
<Item Name="raw.baseId">_baseId</Item>
<Item Name="raw.data[0]">_data[0]</Item>
<Item Name="raw.data[1]">_data[1]</Item>
</Expand>
</Type>
@@ -98,7 +142,7 @@
<Expand HideRawView="true">
<Item Name="data">_data</Item>
<Item Name="typeId">(asmjit::Type::Id)(typeId())</Item>
<Item Name="typeId">(asmjit::TypeId)(typeId())</Item>
<Item Name="regType" Condition="isReg()">(asmjit::BaseReg::RegType)regType()</Item>
<Item Name="regId" Condition="isReg()">regId()</Item>
<Item Name="stackOffset" Condition="isStack()">stackOffset()</Item>
@@ -108,26 +152,26 @@
<Type Name="asmjit::BaseNode">
<Intrinsic Name="nodeType" Expression="_any._nodeType" />
<Intrinsic Name="isInst" Expression="nodeType() == asmjit::BaseNode::kNodeInst"></Intrinsic>
<Intrinsic Name="isSection" Expression="nodeType() == asmjit::BaseNode::kNodeSection"></Intrinsic>
<Intrinsic Name="isLabel" Expression="nodeType() == asmjit::BaseNode::kNodeLabel"></Intrinsic>
<Intrinsic Name="isAlign" Expression="nodeType() == asmjit::BaseNode::kNodeAlign"></Intrinsic>
<Intrinsic Name="isEmbedData" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedData"></Intrinsic>
<Intrinsic Name="isEmbedLabel" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabel"></Intrinsic>
<Intrinsic Name="isEmbedLabelDelta" Expression="nodeType() == asmjit::BaseNode::kNodeEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="isConstPool" Expression="nodeType() == asmjit::BaseNode::kNodeConstPool"></Intrinsic>
<Intrinsic Name="isComment" Expression="nodeType() == asmjit::BaseNode::kNodeComment"></Intrinsic>
<Intrinsic Name="isSentinel" Expression="nodeType() == asmjit::BaseNode::kNodeSentinel"></Intrinsic>
<Intrinsic Name="isJump" Expression="nodeType() == asmjit::BaseNode::kNodeJump"></Intrinsic>
<Intrinsic Name="isFunc" Expression="nodeType() == asmjit::BaseNode::kNodeFunc"></Intrinsic>
<Intrinsic Name="isFuncRet" Expression="nodeType() == asmjit::BaseNode::kNodeFuncRet"></Intrinsic>
<Intrinsic Name="isInvoke" Expression="nodeType() == asmjit::BaseNode::kNodeInvoke"></Intrinsic>
<Intrinsic Name="isInst" Expression="nodeType() == asmjit::NodeType::kInst"></Intrinsic>
<Intrinsic Name="isSection" Expression="nodeType() == asmjit::NodeType::kSection"></Intrinsic>
<Intrinsic Name="isLabel" Expression="nodeType() == asmjit::NodeType::kLabel"></Intrinsic>
<Intrinsic Name="isAlign" Expression="nodeType() == asmjit::NodeType::kAlign"></Intrinsic>
<Intrinsic Name="isEmbedData" Expression="nodeType() == asmjit::NodeType::kEmbedData"></Intrinsic>
<Intrinsic Name="isEmbedLabel" Expression="nodeType() == asmjit::NodeType::kEmbedLabel"></Intrinsic>
<Intrinsic Name="isEmbedLabelDelta" Expression="nodeType() == asmjit::NodeType::kEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="isConstPool" Expression="nodeType() == asmjit::NodeType::kConstPool"></Intrinsic>
<Intrinsic Name="isComment" Expression="nodeType() == asmjit::NodeType::kComment"></Intrinsic>
<Intrinsic Name="isSentinel" Expression="nodeType() == asmjit::NodeType::kSentinel"></Intrinsic>
<Intrinsic Name="isJump" Expression="nodeType() == asmjit::NodeType::kJump"></Intrinsic>
<Intrinsic Name="isFunc" Expression="nodeType() == asmjit::NodeType::kFunc"></Intrinsic>
<Intrinsic Name="isFuncRet" Expression="nodeType() == asmjit::NodeType::kFuncRet"></Intrinsic>
<Intrinsic Name="isInvoke" Expression="nodeType() == asmjit::NodeType::kInvoke"></Intrinsic>
<Intrinsic Name="actsAsInst" Expression="isInst() || isJump() || isFunc() || isFuncRet() || isInvoke()" />
<Intrinsic Name="actsAsLabel" Expression="isLabel() || isFunc()" />
<DisplayString Condition="isInst()">[InstNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SectionNode]</DisplayString>
<DisplayString Condition="isSection()">[SectionNode]</DisplayString>
<DisplayString Condition="isLabel()">[LabelNode]</DisplayString>
<DisplayString Condition="isAlign()">[AlignNode]</DisplayString>
<DisplayString Condition="isEmbedData()">[EmbedDataNode]</DisplayString>
@@ -140,14 +184,14 @@
<DisplayString Condition="isFunc()">[FuncNode]</DisplayString>
<DisplayString Condition="isFuncRet()">[FuncRetNode]</DisplayString>
<DisplayString Condition="isInvoke()">[InvokeNode]</DisplayString>
<DisplayString Condition="nodeType() == 0 || nodeType() &gt; 18">[UnknownNode {nodeType(), d}]</DisplayString>
<DisplayString Condition="nodeType() == asmjit::NodeType::kNone || nodeType() &gt; 18">[UnknownNode {nodeType(), d}]</DisplayString>
<Expand HideRawView="true">
<Item Name="prev">_prev</Item>
<Item Name="next">_next</Item>
<Item Name="nodeType">(asmjit::BaseNode::NodeType)_any._nodeType</Item>
<Item Name="nodeFlags">(asmjit::BaseNode::Flags)_any._nodeFlags</Item>
<Item Name="nodeType">_any._nodeType</Item>
<Item Name="nodeFlags">_any._nodeFlags</Item>
<Item Name="position">_position</Item>
<Item Name="userData.u64">_userDataU64</Item>
@@ -163,9 +207,9 @@
<Item Name="sectionId" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_id</Item>
<Item Name="nextSection" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_nextSection</Item>
<Item Name="labelId" Condition="isLabel()">((asmjit::LabelNode*)this)-&gt;_id</Item>
<Item Name="labelId" Condition="isLabel()">((asmjit::LabelNode*)this)-&gt;_labelId</Item>
<Item Name="alignMode" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignMode</Item>
<Item Name="alignMode" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignData._alignMode</Item>
<Item Name="alignment" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignment</Item>
<Item Name="typeId" Condition="isEmbedData()">_embed._typeId, d</Item>
@@ -175,15 +219,15 @@
<Item Name="inlineData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_inlineData</Item>
<Item Name="externalData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_externalData</Item>
<Item Name="labelId" Condition="isEmbedLabel()">((asmjit::EmbedLabelNode*)this)-&gt;_id</Item>
<Item Name="labelId" Condition="isEmbedLabel()">((asmjit::EmbedLabelNode*)this)-&gt;_labelId</Item>
<Item Name="labelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_id</Item>
<Item Name="baseId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_baseId</Item>
<Item Name="labelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_labelId</Item>
<Item Name="baseLabelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_baseLabelId</Item>
<Item Name="dataSize" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_dataSize</Item>
<Item Name="constPool" Condition="isConstPool()">((asmjit::ConstPoolNode*)this)-&gt;_constPool</Item>
<Item Name="sentinel.sentinelType" Condition="isSentinel()">(asmjit::SentinelNode::SentinelType)_sentinel._sentinelType</Item>
<Item Name="sentinel.sentinelType" Condition="isSentinel()">_sentinel._sentinelType</Item>
<Item Name="annotation" Condition="isJump()">((asmjit::JumpNode*)this)-&gt;_annotation</Item>
@@ -194,7 +238,7 @@
<Item Name="args" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_args, [((asmjit::FuncNode*)this)-&gt;_funcDetail._argCount]</Item>
<Item Name="funcDetail" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_funcDetail</Item>
<Item Name="rets" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_rets, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._retCount]</Item>
<Item Name="rets" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_rets</Item>
<Item Name="args" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_args, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._argCount]</Item>
</Expand>
</Type>

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma push_macro("min")

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifdef _WIN32
#pragma pop_macro("min")

View File

@@ -1,9 +1,9 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
// SPDX-License-Identifier: Zlib
// Official GitHub Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
// Copyright (c) 2008-2021 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_BUILD_P_H_INCLUDED
#define ASMJIT_CORE_API_BUILD_P_H_INCLUDED
@@ -47,10 +29,6 @@
#include <windows.h>
#endif
// ============================================================================
// [asmjit::Build - Globals - Build-Only]
// ============================================================================
#include "./api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)

View File

@@ -1,48 +1,59 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_API_CONFIG_H_INCLUDED
#define ASMJIT_CORE_API_CONFIG_H_INCLUDED
// ============================================================================
// [asmjit::Version]
// ============================================================================
// AsmJit Library & ABI Version
// ============================
//! \addtogroup asmjit_core
//! \{
//! AsmJit library version in `(Major << 16) | (Minor << 8) | (Patch)` format.
#define ASMJIT_LIBRARY_VERSION 0x010400 /* 1.4.0 */
#define ASMJIT_LIBRARY_VERSION 0x010800 /* 1.8.0 */
//! \def ASMJIT_ABI_NAMESPACE
//!
//! AsmJit ABI namespace is an inline namespace within \ref asmjit namespace.
//!
//! It's used to make sure that when user links to an incompatible version of AsmJit, it won't link. It has also some
//! additional properties as well. When `ASMJIT_ABI_NAMESPACE` is defined by the user it would override the AsmJit
//! default, which makes it possible to use use multiple AsmJit libraries within a single project, totally controlled
//! by the users. This is useful especially in cases in which some of such library comes from a third party.
#ifndef ASMJIT_ABI_NAMESPACE
#define ASMJIT_ABI_NAMESPACE _abi_1_8
#endif
//! \}
// ============================================================================
// [asmjit::Build - Documentation]
// ============================================================================
// Global Dependencies
// ===================
// NOTE: Doxygen cannot document macros that are not defined, that's why we have
// to define them and then undefine them, so it won't use the macros with its
// own preprocessor.
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h> // We really want std types as globals, not under 'std' namespace.
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// Build Options
// =============
// NOTE: Doxygen cannot document macros that are not defined, that's why we have to define them and then undefine
// them immediately, so it won't use the macros with its own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
@@ -80,10 +91,10 @@ namespace asmjit {
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
//! Disables JIT memory management and \ref JitRuntime.
//! Disables JIT memory management and \ref asmjit::JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref Logger and \ref Formatter.
//! Disables \ref asmjit::Logger and \ref asmjit::Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
@@ -116,33 +127,6 @@ namespace asmjit {
} // {asmjit}
#endif // _DOXYGEN
// ============================================================================
// [asmjit::Dependencies]
// ============================================================================
// We really want std-types as globals.
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(__EMSCRIPTEN__)
#include <pthread.h>
#endif
// ============================================================================
// [asmjit::Options]
// ============================================================================
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
@@ -150,37 +134,17 @@ namespace asmjit {
// Prevent compile-time errors caused by misconfiguration.
#if defined(ASMJIT_NO_TEXT) && !defined(ASMJIT_NO_LOGGING)
#pragma "ASMJIT_NO_TEXT can only be defined when ASMJIT_NO_LOGGING is defined."
#pragma message("'ASMJIT_NO_TEXT' can only be defined when 'ASMJIT_NO_LOGGING' is defined.")
#undef ASMJIT_NO_TEXT
#endif
#if defined(ASMJIT_NO_INTROSPECTION) && !defined(ASMJIT_NO_COMPILER)
#pragma message("ASMJIT_NO_INTROSPECTION can only be defined when ASMJIT_NO_COMPILER is defined")
#pragma message("'ASMJIT_NO_INTROSPECTION' can only be defined when 'ASMJIT_NO_COMPILER' is defined")
#undef ASMJIT_NO_INTROSPECTION
#endif
// ============================================================================
// [asmjit::Build - Globals - Deprecated]
// ============================================================================
#ifndef ASMJIT_NO_DEPRECATED
#if defined(ASMJIT_BUILD_EMBED) || defined(ASMJIT_BUILD_STATIC)
#if defined(ASMJIT_BUILD_EMBED)
#pragma message("'ASMJIT_BUILD_EMBED' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if defined(ASMJIT_BUILD_STATIC)
#pragma message("'ASMJIT_BUILD_STATIC' is deprecated, use 'ASMJIT_STATIC'")
#endif
#if !defined(ASMJIT_STATIC)
#define ASMJIT_STATIC
#endif
#endif
#endif // !ASMJIT_NO_DEPRECATED
// ============================================================================
// [asmjit::Build - Globals - Build Mode]
// ============================================================================
// Build Mode
// ==========
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
@@ -191,9 +155,8 @@ namespace asmjit {
#endif
#endif
// ============================================================================
// [asmjit::Build - Globals - Target Architecture Information]
// ============================================================================
// Target Architecture Detection
// =============================
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
@@ -239,19 +202,15 @@ namespace asmjit {
#define ASMJIT_ARCH_BE 0
#endif
// ============================================================================
// [asmjit::Build - Globals - Backends]
// ============================================================================
#if defined(ASMJIT_NO_FOREIGN)
#if !ASMJIT_ARCH_X86 && !defined(ASMJIT_NO_X86)
#define ASMJIT_NO_X86
#endif
#endif
// ============================================================================
// [asmjit::Build - Globals - C++ Compiler and Features Detection]
// ============================================================================
// C++ Compiler and Features Detection
// ===================================
#define ASMJIT_CXX_GNU 0
#define ASMJIT_CXX_MAKE_VER(MAJOR, MINOR) ((MAJOR) * 1000 + (MINOR))
@@ -293,9 +252,12 @@ namespace asmjit {
#define ASMJIT_CXX_HAS_ATTRIBUTE(NAME, CHECK) (!(!(CHECK)))
#endif
// ============================================================================
// [asmjit::Build - Globals - API Decorators & Language Extensions]
// ============================================================================
// API Decorators & C++ Extensions
// ===============================
//! \def ASMJIT_API
//!
//! A decorator that is used to decorate API that AsmJit exports when built as a shared library.
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
@@ -324,12 +286,12 @@ namespace asmjit {
#define ASMJIT_VARAPI extern ASMJIT_API
#endif
// This is basically a workaround. When using MSVC and marking class as DLL
// export everything gets exported, which is unwanted in most projects. MSVC
// automatically exports typeinfo and vtable if at least one symbol of the
// class is exported. However, GCC has some strange behavior that even if
// one or more symbol is exported it doesn't export typeinfo unless the
// class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
//! \def ASMJIT_VIRTAPI
//!
//! This is basically a workaround. When using MSVC and marking class as DLL export everything gets exported, which
//! is unwanted in most projects. MSVC automatically exports typeinfo and vtable if at least one symbol of the class
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
#if !defined(_WIN32) && defined(__GNUC__)
#define ASMJIT_VIRTAPI ASMJIT_API
#else
@@ -338,11 +300,11 @@ namespace asmjit {
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#define ASMJIT_FORCE_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#define ASMJIT_INLINE __forceinline
#define ASMJIT_FORCE_INLINE __forceinline
#else
#define ASMJIT_INLINE inline
#define ASMJIT_FORCE_INLINE inline
#endif
#if defined(__GNUC__)
@@ -401,6 +363,17 @@ namespace asmjit {
#define ASMJIT_MAY_ALIAS
#endif
//! \def ASMJIT_MAYBE_UNUSED
//!
//! Expands to `[[maybe_unused]]` if supported or a compiler attribute instead.
#if __cplusplus >= 201703L
#define ASMJIT_MAYBE_UNUSED [[maybe_unused]]
#elif defined(__GNUC__)
#define ASMJIT_MAYBE_UNUSED __attribute__((unused))
#else
#define ASMJIT_MAYBE_UNUSED
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
@@ -457,49 +430,51 @@ namespace asmjit {
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
// ============================================================================
// [asmjit::Build - Globals - Begin-Namespace / End-Namespace]
// ============================================================================
// Begin-Namespace & End-Namespace Macros
// ======================================
#if defined(__clang__)
#if defined _DOXYGEN
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
#define ASMJIT_END_NAMESPACE }
#elif defined(__clang__)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("clang diagnostic push") \
_Pragma("clang diagnostic ignored \"-Wconstant-logical-operand\"") \
_Pragma("clang diagnostic ignored \"-Wunnamed-type-template-args\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("clang diagnostic pop") \
}
}}
#elif defined(__GNUC__) && __GNUC__ == 4
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wmissing-field-initializers\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
}}
#elif defined(__GNUC__) && __GNUC__ >= 8
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wclass-memaccess\"")
#define ASMJIT_END_NAMESPACE \
_Pragma("GCC diagnostic pop") \
}
}}
#elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
#define ASMJIT_BEGIN_NAMESPACE \
namespace asmjit { \
namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE { \
__pragma(warning(push)) \
__pragma(warning(disable: 4127)) /* conditional expression is const */ \
__pragma(warning(disable: 4201)) /* nameless struct/union */
#define ASMJIT_END_NAMESPACE \
__pragma(warning(pop)) \
}
}}
#endif
#if !defined(ASMJIT_BEGIN_NAMESPACE) && !defined(ASMJIT_END_NAMESPACE)
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit {
#define ASMJIT_END_NAMESPACE }
#define ASMJIT_BEGIN_NAMESPACE namespace asmjit { inline namespace ASMJIT_ABI_NAMESPACE {
#define ASMJIT_END_NAMESPACE }}
#endif
#define ASMJIT_BEGIN_SUB_NAMESPACE(NAMESPACE) \
@@ -510,9 +485,8 @@ namespace asmjit {
} \
ASMJIT_END_NAMESPACE
// ============================================================================
// [asmjit::Build - Globals - Utilities]
// ============================================================================
// C++ Utilities
// =============
#define ASMJIT_NONCOPYABLE(Type) \
Type(const Type& other) = delete; \
@@ -523,11 +497,71 @@ namespace asmjit {
Type(const Type& other) = delete; \
Type& operator=(const Type& other) = delete;
// ============================================================================
// [asmjit::Build - Globals - Cleanup]
// ============================================================================
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
//!
//! Defines bit operations for enumeration flags.
#ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
static ASMJIT_FORCE_INLINE constexpr T operator~(T a) noexcept { \
return T(~(std::underlying_type<T>::type)(a)); \
} \
\
static ASMJIT_FORCE_INLINE constexpr T operator|(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_FORCE_INLINE constexpr T operator&(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
} \
static ASMJIT_FORCE_INLINE constexpr T operator^(T a, T b) noexcept { \
return T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
} \
\
static ASMJIT_FORCE_INLINE T& operator|=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) | \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_FORCE_INLINE T& operator&=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) & \
(std::underlying_type<T>::type)(b)); \
return a; \
} \
static ASMJIT_FORCE_INLINE T& operator^=(T& a, T b) noexcept { \
a = T((std::underlying_type<T>::type)(a) ^ \
(std::underlying_type<T>::type)(b)); \
return a; \
}
#endif
//! \def ASMJIT_DEFINE_ENUM_COMPARE(T)
//!
//! Defines comparison operations for enumeration flags.
#ifdef _DOXYGEN
#define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
static ASMJIT_FORCE_INLINE bool operator<(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) < (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_FORCE_INLINE bool operator<=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) <= (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_FORCE_INLINE bool operator>(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) > (std::underlying_type<T>::type)(b); \
} \
static ASMJIT_FORCE_INLINE bool operator>=(T a, T b) noexcept { \
return (std::underlying_type<T>::type)(a) >= (std::underlying_type<T>::type)(b); \
}
#endif
// Cleanup Api-Config Specific Macros
// ==================================
// Cleanup definitions that are only used within this header file.
#undef ASMJIT_CXX_GNU
#undef ASMJIT_CXX_MAKE_VER

View File

@@ -1,106 +1,82 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
#define ASMJIT_CORE_ARCHCOMMONS_H_INCLUDED
// This file provides architecture-specific classes that are required in the
// core library. For example Imm operand allows to be created from arm::Shift
// in a const-expr way, so the arm::Shift must be provided. So this header
// file provides everything architecture-specific that is used by the Core API.
// This file provides architecture-specific classes that are required in the core library. For example Imm operand
// allows to be created from arm::Shift in a const-expr way, so the arm::Shift must be provided. So this header file
// provides everything architecture-specific that is used by the Core API.
#include "../core/globals.h"
// ============================================================================
// [asmjit::arm]
// ============================================================================
ASMJIT_BEGIN_SUB_NAMESPACE(arm)
//! \addtogroup asmjit_arm
//! \{
//! Shift operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real constants on ARM32 and AArch64 hardware,
//! however, the addition constants that describe extend modes are specific to AsmJit and would be translated to the
//! AArch64 specific constants by the assembler.
enum class ShiftOp {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kASR = 0x02u,
//! Rotate right operation.
//!
//! \note Not available in AArch64 mode.
kROR = 0x03u,
//! Rotate right with carry operation (encoded as `kShiftROR` with zero).
//!
//! \note Not available in AArch64 mode.
kRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Represents ARM immediate shift operation type and value.
class Shift {
public:
//! Operation predicate (ARM) describes either SHIFT or EXTEND operation.
//!
//! \note The constants are AsmJit specific. The first 5 values describe real
//! constants on ARM32 and AArch64 hardware, however, the addition constants
//! that describe extend modes are specific to AsmJit and would be translated
//! to the AArch64 specific constants by the assembler.
enum Op : uint32_t {
//! Shift left logical operation (default).
//!
//! Available to all ARM architectures.
kOpLSL = 0x00u,
//! Shift right logical operation.
//!
//! Available to all ARM architectures.
kOpLSR = 0x01u,
//! Shift right arithmetic operation.
//!
//! Available to all ARM architectures.
kOpASR = 0x02u,
//! Rotate right operation.
//!
//! \note Not available in AArch64 mode.
kOpROR = 0x03u,
//! Rotate right with carry operation (encoded as `kShiftROR` with zero).
//!
//! \note Not available in AArch64 mode.
kOpRRX = 0x04u,
//! Shift left by filling low order bits with ones.
kOpMSL = 0x05u,
//! UXTN extend register operation (AArch64 only).
kOpUXTB = 0x06u,
//! UXTH extend register operation (AArch64 only).
kOpUXTH = 0x07u,
//! UXTW extend register operation (AArch64 only).
kOpUXTW = 0x08u,
//! UXTX extend register operation (AArch64 only).
kOpUXTX = 0x09u,
//! SXTB extend register operation (AArch64 only).
kOpSXTB = 0x0Au,
//! SXTH extend register operation (AArch64 only).
kOpSXTH = 0x0Bu,
//! SXTW extend register operation (AArch64 only).
kOpSXTW = 0x0Cu,
//! SXTX extend register operation (AArch64 only).
kOpSXTX = 0x0Du
// NOTE: 0xE and 0xF are used by memory operand to specify POST|PRE offset mode.
};
//! Shift operation.
uint32_t _op;
ShiftOp _op;
//! Shift Value.
uint32_t _value;
@@ -111,51 +87,51 @@ public:
constexpr Shift(const Shift& other) noexcept = default;
//! Constructs Shift from operation `op` and shift `value`.
constexpr Shift(uint32_t op, uint32_t value) noexcept
constexpr Shift(ShiftOp op, uint32_t value) noexcept
: _op(op),
_value(value) {}
//! Returns the shift operation.
constexpr uint32_t op() const noexcept { return _op; }
constexpr ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`.
inline void setOp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift smount.
constexpr uint32_t value() const noexcept { return _value; }
//! Sets shift operation to `op`.
inline void setOp(uint32_t op) noexcept { _op = op; }
//! Sets shift amount to `value`.
inline void setValue(uint32_t value) noexcept { _value = value; }
};
//! Constructs a `LSL #value` shift (logical shift left).
static constexpr Shift lsl(uint32_t value) noexcept { return Shift(Shift::kOpLSL, value); }
static constexpr Shift lsl(uint32_t value) noexcept { return Shift(ShiftOp::kLSL, value); }
//! Constructs a `LSR #value` shift (logical shift right).
static constexpr Shift lsr(uint32_t value) noexcept { return Shift(Shift::kOpLSR, value); }
static constexpr Shift lsr(uint32_t value) noexcept { return Shift(ShiftOp::kLSR, value); }
//! Constructs a `ASR #value` shift (arithmetic shift right).
static constexpr Shift asr(uint32_t value) noexcept { return Shift(Shift::kOpASR, value); }
static constexpr Shift asr(uint32_t value) noexcept { return Shift(ShiftOp::kASR, value); }
//! Constructs a `ROR #value` shift (rotate right).
static constexpr Shift ror(uint32_t value) noexcept { return Shift(Shift::kOpROR, value); }
static constexpr Shift ror(uint32_t value) noexcept { return Shift(ShiftOp::kROR, value); }
//! Constructs a `RRX` shift (rotate with carry by 1).
static constexpr Shift rrx() noexcept { return Shift(Shift::kOpRRX, 0); }
static constexpr Shift rrx() noexcept { return Shift(ShiftOp::kRRX, 0); }
//! Constructs a `MSL #value` shift (logical shift left filling ones).
static constexpr Shift msl(uint32_t value) noexcept { return Shift(Shift::kOpMSL, value); }
static constexpr Shift msl(uint32_t value) noexcept { return Shift(ShiftOp::kMSL, value); }
//! Constructs a `UXTB #value` extend and shift (unsigned byte extend).
static constexpr Shift uxtb(uint32_t value) noexcept { return Shift(Shift::kOpUXTB, value); }
static constexpr Shift uxtb(uint32_t value) noexcept { return Shift(ShiftOp::kUXTB, value); }
//! Constructs a `UXTH #value` extend and shift (unsigned hword extend).
static constexpr Shift uxth(uint32_t value) noexcept { return Shift(Shift::kOpUXTH, value); }
static constexpr Shift uxth(uint32_t value) noexcept { return Shift(ShiftOp::kUXTH, value); }
//! Constructs a `UXTW #value` extend and shift (unsigned word extend).
static constexpr Shift uxtw(uint32_t value) noexcept { return Shift(Shift::kOpUXTW, value); }
static constexpr Shift uxtw(uint32_t value) noexcept { return Shift(ShiftOp::kUXTW, value); }
//! Constructs a `UXTX #value` extend and shift (unsigned dword extend).
static constexpr Shift uxtx(uint32_t value) noexcept { return Shift(Shift::kOpUXTX, value); }
static constexpr Shift uxtx(uint32_t value) noexcept { return Shift(ShiftOp::kUXTX, value); }
//! Constructs a `SXTB #value` extend and shift (signed byte extend).
static constexpr Shift sxtb(uint32_t value) noexcept { return Shift(Shift::kOpSXTB, value); }
static constexpr Shift sxtb(uint32_t value) noexcept { return Shift(ShiftOp::kSXTB, value); }
//! Constructs a `SXTH #value` extend and shift (signed hword extend).
static constexpr Shift sxth(uint32_t value) noexcept { return Shift(Shift::kOpSXTH, value); }
static constexpr Shift sxth(uint32_t value) noexcept { return Shift(ShiftOp::kSXTH, value); }
//! Constructs a `SXTW #value` extend and shift (signed word extend).
static constexpr Shift sxtw(uint32_t value) noexcept { return Shift(Shift::kOpSXTW, value); }
static constexpr Shift sxtw(uint32_t value) noexcept { return Shift(ShiftOp::kSXTW, value); }
//! Constructs a `SXTX #value` extend and shift (signed dword extend).
static constexpr Shift sxtx(uint32_t value) noexcept { return Shift(Shift::kOpSXTX, value); }
static constexpr Shift sxtx(uint32_t value) noexcept { return Shift(ShiftOp::kSXTX, value); }
//! \}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
@@ -35,10 +17,6 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
static const constexpr ArchTraits noArchTraits = {
// SP/FP/LR/PC.
0xFF, 0xFF, 0xFF, 0xFF,
@@ -53,27 +31,38 @@ static const constexpr ArchTraits noArchTraits = {
0, 0,
// ISA features [Gp, Vec, Other0, Other1].
{ 0, 0, 0, 0},
{{
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegTypeToSignature.
{ { 0 } },
#define V(index) { OperandSignature(0) }
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
{ 0 },
#define V(index) TypeId::kVoid
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
{ 0 },
#define V(index) RegType::kNone
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ISAWordNameId::kByte,
ISAWordNameId::kHalf,
ISAWordNameId::kWord,
ISAWordNameId::kQuad
ArchTypeNameId::kByte,
ArchTypeNameId::kHalf,
ArchTypeNameId::kWord,
ArchTypeNameId::kQuad
}
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount] = {
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1] = {
// No architecture.
noArchTraits,
@@ -111,63 +100,60 @@ ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount] = {
noArchTraits
};
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfoOut) noexcept {
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId?
if (typeId <= BaseReg::kTypeMax)
typeId = archTraits.regTypeToTypeId(typeId);
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue))
typeId = archTraits.regTypeToTypeId(RegType(uint32_t(typeId)));
if (ASMJIT_UNLIKELY(!Type::isValid(typeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId)))
return DebugUtils::errored(kErrorInvalidTypeId);
// First normalize architecture dependent types.
if (Type::isAbstract(typeId)) {
if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == Type::kIdIntPtr)
typeId = is32Bit ? Type::kIdI32 : Type::kIdI64;
if (typeId == TypeId::kIntPtr)
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
else
typeId = is32Bit ? Type::kIdU32 : Type::kIdU64;
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = Type::sizeOf(typeId);
uint32_t size = TypeUtils::sizeOf(typeId);
if (ASMJIT_UNLIKELY(!size))
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(typeId == Type::kIdF80))
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80))
return DebugUtils::errored(kErrorInvalidUseOfF80);
uint32_t regType = 0;
if (typeId >= Type::_kIdBaseStart && typeId < Type::_kIdVec32Start) {
regType = archTraits._typeIdToRegType[typeId - Type::_kIdBaseStart];
if (!regType) {
if (typeId == Type::kIdI64 || typeId == Type::kIdU64)
RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64)
return DebugUtils::errored(kErrorInvalidUseOfGpq);
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
}
else {
if (size <= 8 && archTraits._regInfo[BaseReg::kTypeVec64].isValid())
regType = BaseReg::kTypeVec64;
else if (size <= 16 && archTraits._regInfo[BaseReg::kTypeVec128].isValid())
regType = BaseReg::kTypeVec128;
else if (size == 32 && archTraits._regInfo[BaseReg::kTypeVec256].isValid())
regType = BaseReg::kTypeVec256;
else if (archTraits._regInfo[BaseReg::kTypeVec512].isValid())
regType = BaseReg::kTypeVec512;
if (size <= 8 && archTraits._regSignature[RegType::kVec64].isValid())
regType = RegType::kVec64;
else if (size <= 16 && archTraits._regSignature[RegType::kVec128].isValid())
regType = RegType::kVec128;
else if (size == 32 && archTraits._regSignature[RegType::kVec256].isValid())
regType = RegType::kVec256;
else if (archTraits._regSignature[RegType::kVec512].isValid())
regType = RegType::kVec512;
else
return DebugUtils::errored(kErrorInvalidTypeId);
}
*typeIdOut = typeId;
regInfoOut->reset(archTraits.regTypeToSignature(regType));
*regSignatureOut = archTraits.regTypeToSignature(regType);
return kErrorOk;
}

View File

@@ -1,31 +1,13 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#define ASMJIT_CORE_ARCHTRAITS_H_INCLUDED
#include "../core/environment.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/type.h"
ASMJIT_BEGIN_NAMESPACE
@@ -33,8 +15,98 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
//! Instruction set architecture (ISA).
enum class Arch : uint8_t {
//! Unknown or uninitialized ISA.
kUnknown = 0,
//! 32-bit X86 ISA.
kX86 = 1,
//! 64-bit X86 ISA also known as X64, X86_64, and AMD64.
kX64 = 2,
//! 32-bit RISC-V ISA.
kRISCV32 = 3,
//! 64-bit RISC-V ISA.
kRISCV64 = 4,
//! 32-bit ARM ISA (little endian).
kARM = 5,
//! 64-bit ARM ISA in (little endian).
kAArch64 = 6,
//! 32-bit ARM ISA in Thumb mode (little endian).
kThumb = 7,
// 8 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (little endian).
kMIPS32_LE = 9,
//! 64-bit MIPS ISA in (little endian).
kMIPS64_LE = 10,
//! 32-bit ARM ISA (big endian).
kARM_BE = 11,
//! 64-bit ARM ISA in (big endian).
kAArch64_BE = 12,
//! 32-bit ARM ISA in Thumb mode (big endian).
kThumb_BE = 13,
// 14 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (big endian).
kMIPS32_BE = 15,
//! 64-bit MIPS ISA in (big endian).
kMIPS64_BE = 16,
//! Maximum value of `Arch`.
kMaxValue = kMIPS64_BE,
//! Mask used by 32-bit ISAs (odd are 32-bit, even are 64-bit).
k32BitMask = 0x01,
//! First big-endian architecture.
kBigEndian = kARM_BE,
//! ISA detected at compile-time (ISA of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
ASMJIT_ARCH_X86 == 32 ? kX86 :
ASMJIT_ARCH_X86 == 64 ? kX64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kMIPS64_BE :
kUnknown
#endif
};
//! Sub-architecture.
enum class SubArch : uint8_t {
//! Unknown or uninitialized architecture sub-type.
kUnknown = 0,
//! Maximum value of `SubArch`.
kMaxValue = kUnknown,
//! Sub-architecture detected at compile-time (sub-architecture of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Identifier used to represent names of different data types across architectures.
enum class ISAWordNameId : uint8_t {
enum class ArchTypeNameId : uint8_t {
//! Describes 'db' (X86/X86_64 convention, always 8-bit quantity).
kDB = 0,
//! Describes 'dw' (X86/X86_64 convention, always 16-bit word).
@@ -64,23 +136,33 @@ enum class ISAWordNameId : uint8_t {
//! Describes 'quad' (64-bit word).
kQuad,
//! Maximum value.
//! Maximum value of `ArchTypeNameId`.
kMaxValue = kQuad
};
// ============================================================================
// [asmjit::ArchTraits]
// ============================================================================
//! Instruction feature hints for each register group provided by \ref ArchTraits.
//!
//! Instruction feature hints describe miscellaneous instructions provided by the architecture that can be used by
//! register allocator to make certain things simpler - like register swaps or emitting register push/pop sequences.
//!
//! \remarks Instruction feature hints are only defined for register groups that can be used with \ref
//! asmjit_compiler infrastructure. Register groups that are not managed by Compiler are not provided by
//! \ref ArchTraits and cannot be queried.
enum class InstHints : uint8_t {
//! No feature hints.
kNoHints = 0,
//! Architecture supports a register swap by using a single instructio.
kRegSwap = 0x01u,
//! Architecture provides push/pop instructions.
kPushPop = 0x02u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstHints)
//! Architecture traits used by Function API and Compiler's register allocator.
struct ArchTraits {
//! ISA features for each register group.
enum IsaFeatures : uint32_t {
//! ISA features a register swap by using a single instruction.
kIsaFeatureSwap = 0x01u,
//! ISA features a push/pop like instruction for this register group.
kIsaFeaturePushPop = 0x02u,
};
//! \name Members
//! \{
//! Stack pointer register id.
uint8_t _spRegId;
@@ -101,84 +183,69 @@ struct ArchTraits {
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
//! Flags for each virtual register group (always covers GP and Vec groups).
uint8_t _isaFlags[BaseReg::kGroupVirt];
//! Flags for each virtual register group.
Support::Array<InstHints, Globals::kNumVirtGroups> _instHints;
//! Maps register type into a signature, that provides group, size and can
//! be used to construct register operands.
RegInfo _regInfo[BaseReg::kTypeMax + 1];
//! Maps a register to type-id, see \ref Type::Id.
uint8_t _regTypeToTypeId[BaseReg::kTypeMax + 1];
//! Maps base TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref Type::Id.
uint8_t _typeIdToRegType[32];
//! Maps register type into a signature, that provides group, size and can be used to construct register operands.
Support::Array<OperandSignature, uint32_t(RegType::kMaxValue) + 1> _regSignature;
//! Maps a register to type-id, see \ref TypeId.
Support::Array<TypeId, uint32_t(RegType::kMaxValue) + 1> _regTypeToTypeId;
//! Maps scalar TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref TypeId.
Support::Array<RegType, 32> _typeIdToRegType;
//! Word name identifiers of 8-bit, 16-bit, 32-biit, and 64-bit quantities that appear in formatted text.
ISAWordNameId _isaWordNameIdTable[4];
ArchTypeNameId _typeNameIdTable[4];
//! Resets all members to zeros.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
//! \}
//! \name Accessors
//! \{
//! Returns stack pointer register id.
inline constexpr uint32_t spRegId() const noexcept { return _spRegId; }
inline uint32_t spRegId() const noexcept { return _spRegId; }
//! Returns stack frame register id.
inline constexpr uint32_t fpRegId() const noexcept { return _fpRegId; }
inline uint32_t fpRegId() const noexcept { return _fpRegId; }
//! Returns link register id, if the architecture provides it.
inline constexpr uint32_t linkRegId() const noexcept { return _linkRegId; }
inline uint32_t linkRegId() const noexcept { return _linkRegId; }
//! Returns instruction pointer register id, if the architecture provides it.
inline constexpr uint32_t ipRegId() const noexcept { return _ipRegId; }
inline uint32_t ipRegId() const noexcept { return _ipRegId; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain
//! it would return the lowest alignment (1), however, some architectures may
//! constrain the alignment, for example AArch64 requires 16-byte alignment.
inline constexpr uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
inline uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
//! Tests whether the architecture provides link register, which is used across
//! function calls. If the link register is not provided then a function call
//! pushes the return address on stack (X86/X64).
inline constexpr bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64).
inline bool hasLinkReg() const noexcept { return _linkRegId != BaseReg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
inline constexpr uint32_t minStackOffset() const noexcept { return _minStackOffset; }
inline uint32_t minStackOffset() const noexcept { return _minStackOffset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
inline constexpr uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
inline uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
//! Returns ISA flags of the given register `group`.
inline constexpr uint32_t isaFlags(uint32_t group) const noexcept { return _isaFlags[group]; }
inline InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
//! Tests whether the given register `group` has the given `flag` set.
inline constexpr bool hasIsaFlag(uint32_t group, uint32_t flag) const noexcept { return (_isaFlags[group] & flag) != 0; }
inline bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
inline constexpr bool hasSwap(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeatureSwap); }
inline bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
inline constexpr bool hasPushPop(uint32_t group) const noexcept { return hasIsaFlag(group, kIsaFeaturePushPop); }
inline bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
inline uint32_t hasRegType(uint32_t rType) const noexcept {
return rType <= BaseReg::kTypeMax && _regInfo[rType].signature() != 0;
inline bool hasRegType(RegType type) const noexcept {
return type <= RegType::kMaxValue && _regSignature[type].isValid();
}
inline uint32_t regTypeToSignature(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].signature();
}
inline uint32_t regTypeToGroup(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].group();
}
inline uint32_t regTypeToSize(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regInfo[rType].size();
}
inline uint32_t regTypeToTypeId(uint32_t rType) const noexcept {
ASMJIT_ASSERT(rType <= BaseReg::kTypeMax);
return _regTypeToTypeId[rType];
}
//! Returns an operand signature from the given register `type` of this architecture.
inline OperandSignature regTypeToSignature(RegType type) const noexcept { return _regSignature[type]; }
//! Returns a register from the given register `type` of this architecture.
inline RegGroup regTypeToGroup(RegType type) const noexcept { return _regSignature[type].regGroup(); }
//! Returns a register size the given register `type` of this architecture.
inline uint32_t regTypeToSize(RegType type) const noexcept { return _regSignature[type].size(); }
//! Returns a corresponding `TypeId` from the given register `type` of this architecture.
inline TypeId regTypeToTypeId(RegType type) const noexcept { return _regTypeToTypeId[type]; }
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
//!
@@ -187,10 +254,10 @@ struct ArchTraits {
//! - [1] 16-bits
//! - [2] 32-bits
//! - [3] 64-bits
inline const ISAWordNameId* isaWordNameIdTable() const noexcept { return _isaWordNameIdTable; }
inline const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
//! Returns an ISA word name identifier of the given `index`, see \ref isaWordNameIdTable() for more details.
inline ISAWordNameId isaWordNameId(uint32_t index) const noexcept { return _isaWordNameIdTable[index]; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
inline ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
//! \}
@@ -198,23 +265,21 @@ struct ArchTraits {
//! \{
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
static inline const ArchTraits& byArch(uint32_t arch) noexcept;
static inline const ArchTraits& byArch(Arch arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[Environment::kArchCount];
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1];
inline const ArchTraits& ArchTraits::byArch(uint32_t arch) noexcept { return _archTraits[arch & ~Environment::kArchBigEndianMask]; }
// ============================================================================
// [asmjit::ArchUtils]
// ============================================================================
//! \cond
inline const ArchTraits& ArchTraits::byArch(Arch arch) noexcept { return _archTraits[uint32_t(arch)]; }
//! \endcond
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegInfo(uint32_t arch, uint32_t typeId, uint32_t* typeIdOut, RegInfo* regInfo) noexcept;
ASMJIT_API Error typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept;
} // {ArchUtils}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
@@ -32,18 +14,16 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseAssembler - Construction / Destruction]
// ============================================================================
// BaseAssembler - Construction & Destruction
// ==========================================
BaseAssembler::BaseAssembler() noexcept
: BaseEmitter(kTypeAssembler) {}
: BaseEmitter(EmitterType::kAssembler) {}
BaseAssembler::~BaseAssembler() noexcept {}
// ============================================================================
// [asmjit::BaseAssembler - Buffer Management]
// ============================================================================
// BaseAssembler - Buffer Management
// =================================
Error BaseAssembler::setOffset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code))
@@ -57,9 +37,8 @@ Error BaseAssembler::setOffset(size_t offset) {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Section Management]
// ============================================================================
// BaseAssembler - Section Management
// ==================================
static void BaseAssembler_initSection(BaseAssembler* self, Section* section) noexcept {
uint8_t* p = section->_buffer._data;
@@ -86,9 +65,8 @@ Error BaseAssembler::section(Section* section) {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Label Management]
// ============================================================================
// BaseAssembler - Label Management
// ================================
Label BaseAssembler::newLabel() {
uint32_t labelId = Globals::kInvalidId;
@@ -103,7 +81,7 @@ Label BaseAssembler::newLabel() {
return Label(labelId);
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
if (ASMJIT_LIKELY(_code)) {
LabelEntry* le;
@@ -134,9 +112,8 @@ Error BaseAssembler::bind(const Label& label) {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Embed]
// ============================================================================
// BaseAssembler - Embed
// =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
@@ -154,7 +131,7 @@ Error BaseAssembler::embed(const void* data, size_t dataSize) {
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), Type::kIdU8, data, dataSize, 1);
Formatter::formatData(sb, _logger->flags(), arch(), TypeId::kUInt8, data, dataSize, 1);
sb.append('\n');
_logger->log(sb);
}
@@ -163,17 +140,17 @@ Error BaseAssembler::embed(const void* data, size_t dataSize) {
return kErrorOk;
}
Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (itemCount == 0 || repeatCount == 0)
return kErrorOk;
uint32_t typeSize = Type::sizeOf(finalTypeId);
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
@@ -203,16 +180,16 @@ Error BaseAssembler::embedDataArray(uint32_t typeId, const void* data, size_t it
}
#ifndef ASMJIT_NO_LOGGING
static const uint8_t dataTypeIdBySize[9] = {
Type::kIdVoid, // [0] (invalid)
Type::kIdU8, // [1] (uint8_t)
Type::kIdU16, // [2] (uint16_t)
Type::kIdVoid, // [3] (invalid)
Type::kIdU32, // [4] (uint32_t)
Type::kIdVoid, // [5] (invalid)
Type::kIdVoid, // [6] (invalid)
Type::kIdVoid, // [7] (invalid)
Type::kIdU64 // [8] (uint64_t)
static const TypeId dataTypeIdBySize[9] = {
TypeId::kVoid, // [0] (invalid)
TypeId::kUInt8, // [1] (uint8_t)
TypeId::kUInt16, // [2] (uint16_t)
TypeId::kVoid, // [3] (invalid)
TypeId::kUInt32, // [4] (uint32_t)
TypeId::kVoid, // [5] (invalid)
TypeId::kVoid, // [6] (invalid)
TypeId::kVoid, // [7] (invalid)
TypeId::kUInt64 // [8] (uint64_t)
};
#endif
@@ -223,7 +200,7 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!isLabelValid(label)))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
size_t size = pool.size();
@@ -282,13 +259,13 @@ Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(' ');
Formatter::formatLabel(sb, 0, this, label.id());
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeRelToAbs);
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
@@ -343,9 +320,9 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
sb.append(" (");
Formatter::formatLabel(sb, 0, this, label.id());
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, 0, this, base.id());
Formatter::formatLabel(sb, FormatFlags::kNone, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
@@ -358,7 +335,7 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocEntry::kTypeExpression);
Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
@@ -367,7 +344,7 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
return reportError(DebugUtils::errored(kErrorOutOfMemory));
exp->reset();
exp->opType = Expression::kOpSub;
exp->opType = ExpressionOpType::kSub;
exp->setValueAsLabel(0, labelEntry);
exp->setValueAsLabel(1, baseEntry);
@@ -383,19 +360,18 @@ Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseAssembler - Comment]
// ============================================================================
// BaseAssembler - Comment
// =======================
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
#ifndef ASMJIT_NO_LOGGING
// Logger cannot be NULL if `kFlagLogComments` is set.
// Logger cannot be NULL if `EmitterFlags::kLogComments` is set.
ASMJIT_ASSERT(_logger != nullptr);
_logger->log(data, size);
@@ -407,9 +383,8 @@ Error BaseAssembler::comment(const char* data, size_t size) {
#endif
}
// ============================================================================
// [asmjit::BaseAssembler - Events]
// ============================================================================
// BaseAssembler - Events
// ======================
Error BaseAssembler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));

View File

@@ -1,31 +1,12 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#define ASMJIT_CORE_ASSEMBLER_H_INCLUDED
#include "../core/codeholder.h"
#include "../core/datatypes.h"
#include "../core/emitter.h"
#include "../core/operand.h"
@@ -34,10 +15,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [asmjit::BaseAssembler]
// ============================================================================
//! Base assembler.
//!
//! This is a base class that provides interface used by architecture specific
@@ -112,7 +89,7 @@ public:
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
@@ -121,7 +98,7 @@ public:
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) override;
ASMJIT_API Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_BUILDER
@@ -33,9 +15,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::PostponedErrorHandler (Internal)]
// ============================================================================
// PostponedErrorHandler (Internal)
// ================================
//! Postponed error handler that never throws. Used as a temporal error handler
//! to run passes. If error occurs, the caller is notified and will call the
@@ -50,9 +31,8 @@ public:
StringTmp<128> _message;
};
// ============================================================================
// [asmjit::BaseBuilder - Utilities]
// ============================================================================
// BaseBuilder - Utilities
// =======================
static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
for (Pass* pass : self->_passes)
@@ -60,12 +40,11 @@ static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
self->_passes.reset();
}
// ============================================================================
// [asmjit::BaseBuilder - Construction / Destruction]
// ============================================================================
// BaseBuilder - Construction & Destruction
// ========================================
BaseBuilder::BaseBuilder() noexcept
: BaseEmitter(kTypeBuilder),
: BaseEmitter(EmitterType::kBuilder),
_codeZone(32768 - Zone::kBlockOverhead),
_dataZone(16384 - Zone::kBlockOverhead),
_passZone(65536 - Zone::kBlockOverhead),
@@ -75,11 +54,10 @@ BaseBuilder::~BaseBuilder() noexcept {
BaseBuilder_deletePasses(this);
}
// ============================================================================
// [asmjit::BaseBuilder - Node Management]
// ============================================================================
// BaseBuilder - Node Management
// =============================
Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOptions, uint32_t opCount) {
Error BaseBuilder::newInstNode(InstNode** out, InstId instId, InstOptions instOptions, uint32_t opCount) {
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
ASMJIT_ASSERT(opCapacity >= InstNode::kBaseOpCapacity);
@@ -92,28 +70,28 @@ Error BaseBuilder::_newInstNode(InstNode** out, uint32_t instId, uint32_t instOp
}
Error BaseBuilder::_newLabelNode(LabelNode** out) {
Error BaseBuilder::newLabelNode(LabelNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<LabelNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newAlignNode(AlignNode** out, uint32_t alignMode, uint32_t alignment) {
Error BaseBuilder::newAlignNode(AlignNode** out, AlignMode alignMode, uint32_t alignment) {
*out = nullptr;
return _newNodeT<AlignNode>(out, alignMode, alignment);
}
Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) {
Error BaseBuilder::newEmbedDataNode(EmbedDataNode** out, TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
*out = nullptr;
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize());
uint32_t finalTypeId = Type::deabstract(typeId, deabstractDelta);
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
if (ASMJIT_UNLIKELY(!Type::isValid(finalTypeId)))
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId)))
return reportError(DebugUtils::errored(kErrorInvalidArgument));
uint32_t typeSize = Type::sizeOf(finalTypeId);
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
@@ -123,7 +101,7 @@ Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newNodeT<EmbedDataNode>(&node));
node->_embed._typeId = uint8_t(typeId);
node->_embed._typeId = typeId;
node->_embed._typeSize = uint8_t(typeSize);
node->_itemCount = itemCount;
node->_repeatCount = repeatCount;
@@ -143,14 +121,14 @@ Error BaseBuilder::_newEmbedDataNode(EmbedDataNode** out, uint32_t typeId, const
return kErrorOk;
}
Error BaseBuilder::_newConstPoolNode(ConstPoolNode** out) {
Error BaseBuilder::newConstPoolNode(ConstPoolNode** out) {
*out = nullptr;
ASMJIT_PROPAGATE(_newNodeT<ConstPoolNode>(out));
return registerLabelNode(*out);
}
Error BaseBuilder::_newCommentNode(CommentNode** out, const char* data, size_t size) {
Error BaseBuilder::newCommentNode(CommentNode** out, const char* data, size_t size) {
*out = nullptr;
if (data) {
@@ -198,7 +176,7 @@ BaseNode* BaseBuilder::addNode(BaseNode* node) noexcept {
_lastNode = node;
}
node->addFlags(BaseNode::kFlagIsActive);
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
@@ -219,7 +197,7 @@ BaseNode* BaseBuilder::addAfter(BaseNode* node, BaseNode* ref) noexcept {
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
@@ -246,7 +224,7 @@ BaseNode* BaseBuilder::addBefore(BaseNode* node, BaseNode* ref) noexcept {
node->_prev = prev;
node->_next = next;
node->addFlags(BaseNode::kFlagIsActive);
node->addFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
@@ -278,7 +256,7 @@ BaseNode* BaseBuilder::removeNode(BaseNode* node) noexcept {
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
node->clearFlags(NodeFlags::kIsActive);
if (node->isSection())
_dirtySectionLinks = true;
@@ -319,7 +297,7 @@ void BaseBuilder::removeNodes(BaseNode* first, BaseNode* last) noexcept {
node->_prev = nullptr;
node->_next = nullptr;
node->clearFlags(BaseNode::kFlagIsActive);
node->clearFlags(NodeFlags::kIsActive);
didRemoveSection |= uint32_t(node->isSection());
if (_cursor == node)
@@ -340,9 +318,8 @@ BaseNode* BaseBuilder::setCursor(BaseNode* node) noexcept {
return old;
}
// ============================================================================
// [asmjit::BaseBuilder - Section]
// ============================================================================
// BaseBuilder - Sections
// ======================
Error BaseBuilder::sectionNodeOf(SectionNode** out, uint32_t sectionId) {
*out = nullptr;
@@ -424,9 +401,8 @@ void BaseBuilder::updateSectionLinks() noexcept {
_dirtySectionLinks = false;
}
// ============================================================================
// [asmjit::BaseBuilder - Labels]
// ============================================================================
// BaseBuilder - Labels
// ====================
Error BaseBuilder::labelNodeOf(LabelNode** out, uint32_t labelId) {
*out = nullptr;
@@ -500,7 +476,7 @@ Label BaseBuilder::newLabel() {
return Label(labelId);
}
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, uint32_t type, uint32_t parentId) {
Label BaseBuilder::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
uint32_t labelId = Globals::kInvalidId;
LabelEntry* le;
@@ -521,9 +497,8 @@ Error BaseBuilder::bind(const Label& label) {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Passes]
// ============================================================================
// BaseBuilder - Passes
// ====================
ASMJIT_FAVOR_SIZE Pass* BaseBuilder::passByName(const char* name) const noexcept {
for (Pass* pass : _passes)
@@ -603,21 +578,20 @@ Error BaseBuilder::runPasses() {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Emit]
// ============================================================================
// BaseBuilder - Emit
// ==================
Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
Error BaseBuilder::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
uint32_t opCount = EmitterUtils::opCountFromEmitArgs(o0, o1, o2, opExt);
uint32_t options = instOptions() | forcedInstOptions();
InstOptions options = instOptions() | forcedInstOptions();
if (options & BaseInst::kOptionReserved) {
if (Support::test(options, InstOptions::kReserved)) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
#ifndef ASMJIT_NO_VALIDATION
// Strict validation.
if (hasValidationOption(kValidationOptionIntermediate)) {
if (hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate)) {
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
@@ -631,8 +605,8 @@ Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1
}
#endif
// Clear options that should never be part of `InstNode`.
options &= ~BaseInst::kOptionReserved;
// Clear instruction options that should never be part of a regular instruction.
options &= ~InstOptions::kReserved;
}
uint32_t opCapacity = InstNode::capacityOfOpCount(opCount);
@@ -666,42 +640,40 @@ Error BaseBuilder::_emit(uint32_t instId, const Operand_& o0, const Operand_& o1
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Align]
// ============================================================================
// BaseBuilder - Align
// ===================
Error BaseBuilder::align(uint32_t alignMode, uint32_t alignment) {
Error BaseBuilder::align(AlignMode alignMode, uint32_t alignment) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
AlignNode* node;
ASMJIT_PROPAGATE(_newAlignNode(&node, alignMode, alignment));
ASMJIT_PROPAGATE(newAlignNode(&node, alignMode, alignment));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Embed]
// ============================================================================
// BaseBuilder - Embed
// ===================
Error BaseBuilder::embed(const void* data, size_t dataSize) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, data, dataSize));
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, data, dataSize));
addNode(node);
return kErrorOk;
}
Error BaseBuilder::embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t itemRepeat) {
Error BaseBuilder::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t itemRepeat) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
ASMJIT_PROPAGATE(newEmbedDataNode(&node, typeId, data, itemCount, itemRepeat));
addNode(node);
return kErrorOk;
@@ -714,23 +686,22 @@ Error BaseBuilder::embedConstPool(const Label& label, const ConstPool& pool) {
if (!isLabelValid(label))
return reportError(DebugUtils::errored(kErrorInvalidLabel));
ASMJIT_PROPAGATE(align(kAlignData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
ASMJIT_PROPAGATE(bind(label));
EmbedDataNode* node;
ASMJIT_PROPAGATE(_newEmbedDataNode(&node, Type::kIdU8, nullptr, pool.size()));
ASMJIT_PROPAGATE(newEmbedDataNode(&node, TypeId::kUInt8, nullptr, pool.size()));
pool.fill(node->data());
addNode(node);
return kErrorOk;
}
// EmbedLabel / EmbedLabelDelta
// ----------------------------
// BaseBuilder - EmbedLabel & EmbedLabelDelta
// ==========================================
//
// If dataSize is zero it means that the size is the same as target register
// width, however, if it's provided we really want to validate whether it's
// within the possible range.
// If dataSize is zero it means that the size is the same as target register width, however,
// if it's provided we really want to validate whether it's within the possible range.
static inline bool BaseBuilder_checkDataSize(size_t dataSize) noexcept {
return !dataSize || (Support::isPowerOf2(dataSize) && dataSize <= 8);
@@ -764,24 +735,22 @@ Error BaseBuilder::embedLabelDelta(const Label& label, const Label& base, size_t
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Comment]
// ============================================================================
// BaseBuilder - Comment
// =====================
Error BaseBuilder::comment(const char* data, size_t size) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
CommentNode* node;
ASMJIT_PROPAGATE(_newCommentNode(&node, data, size));
ASMJIT_PROPAGATE(newCommentNode(&node, data, size));
addNode(node);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseBuilder - Serialize]
// ============================================================================
// BaseBuilder - SerializeTo
// =========================
Error BaseBuilder::serializeTo(BaseEmitter* dst) {
Error err = kErrorOk;
@@ -796,7 +765,7 @@ Error BaseBuilder::serializeTo(BaseEmitter* dst) {
InstNode* node = node_->as<InstNode>();
// NOTE: Inlined to remove one additional call per instruction.
dst->setInstOptions(node->instOptions());
dst->setInstOptions(node->options());
dst->setExtraReg(node->extraReg());
const Operand_* op = node->operands();
@@ -862,9 +831,8 @@ Error BaseBuilder::serializeTo(BaseEmitter* dst) {
return err;
}
// ============================================================================
// [asmjit::BaseBuilder - Events]
// ============================================================================
// BaseBuilder - Events
// ====================
Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
@@ -883,7 +851,7 @@ Error BaseBuilder::onAttach(CodeHolder* code) noexcept {
_cursor = initialSection;
_firstNode = initialSection;
_lastNode = initialSection;
initialSection->setFlags(BaseNode::kFlagIsActive);
initialSection->setFlags(NodeFlags::kIsActive);
return kErrorOk;
}
@@ -898,8 +866,7 @@ Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
_dataZone.reset();
_passZone.reset();
_nodeFlags = 0;
_nodeFlags = NodeFlags::kNone;
_cursor = nullptr;
_firstNode = nullptr;
_lastNode = nullptr;
@@ -907,9 +874,8 @@ Error BaseBuilder::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::Pass - Construction / Destruction]
// ============================================================================
// Pass - Construction & Destruction
// =================================
Pass::Pass(const char* name) noexcept
: _name(name) {}

File diff suppressed because it is too large Load Diff

View File

@@ -1,42 +1,35 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFER_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CodeBuffer]
// ============================================================================
//! Flags used by \ref CodeBuffer.
enum class CodeBufferFlags : uint32_t {
//! No flags.
kNone = 0,
//! Buffer is external (not allocated by asmjit).
kIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kIsFixed = 0x00000002u
};
ASMJIT_DEFINE_ENUM_FLAGS(CodeBufferFlags)
//! Code or data buffer.
struct CodeBuffer {
//! \name Members
//! \{
//! The content of the buffer (data).
uint8_t* _data;
//! Number of bytes of `data` used.
@@ -44,15 +37,9 @@ struct CodeBuffer {
//! Buffer capacity (in bytes).
size_t _capacity;
//! Buffer flags.
uint32_t _flags;
CodeBufferFlags _flags;
//! Code buffer flags.
enum Flags : uint32_t {
//! Buffer is external (not allocated by asmjit).
kFlagIsExternal = 0x00000001u,
//! Buffer is fixed (cannot be reallocated).
kFlagIsFixed = 0x00000002u
};
//! \}
//! \name Overloaded Operators
//! \{
@@ -73,20 +60,20 @@ struct CodeBuffer {
//! \name Accessors
//! \{
//! Returns code buffer flags, see \ref Flags.
inline uint32_t flags() const noexcept { return _flags; }
//! Returns code buffer flags.
inline CodeBufferFlags flags() const noexcept { return _flags; }
//! Tests whether the code buffer has the given `flag` set.
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
inline bool isFixed() const noexcept { return hasFlag(kFlagIsFixed); }
inline bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
inline bool isExternal() const noexcept { return hasFlag(kFlagIsExternal); }
inline bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
inline bool isAllocated() const noexcept { return _data != nullptr; }

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
@@ -32,9 +14,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Globals]
// ============================================================================
// Globals
// =======
static const char CodeHolder_addrTabName[] = ".addrtab";
@@ -43,31 +24,30 @@ static inline uint32_t x86EncodeMod(uint32_t m, uint32_t o, uint32_t rm) noexcep
return (m << 6) | (o << 3) | rm;
}
// ============================================================================
// [asmjit::LabelLinkIterator]
// ============================================================================
// LabelLinkIterator
// =================
class LabelLinkIterator {
public:
ASMJIT_INLINE LabelLinkIterator(LabelEntry* le) noexcept { reset(le); }
inline LabelLinkIterator(LabelEntry* le) noexcept { reset(le); }
ASMJIT_INLINE explicit operator bool() const noexcept { return isValid(); }
ASMJIT_INLINE bool isValid() const noexcept { return _link != nullptr; }
inline explicit operator bool() const noexcept { return isValid(); }
inline bool isValid() const noexcept { return _link != nullptr; }
ASMJIT_INLINE LabelLink* link() const noexcept { return _link; }
ASMJIT_INLINE LabelLink* operator->() const noexcept { return _link; }
inline LabelLink* link() const noexcept { return _link; }
inline LabelLink* operator->() const noexcept { return _link; }
ASMJIT_INLINE void reset(LabelEntry* le) noexcept {
inline void reset(LabelEntry* le) noexcept {
_pPrev = &le->_links;
_link = *_pPrev;
}
ASMJIT_INLINE void next() noexcept {
inline void next() noexcept {
_pPrev = &_link->next;
_link = *_pPrev;
}
ASMJIT_INLINE void resolveAndNext(CodeHolder* code) noexcept {
inline void resolveAndNext(CodeHolder* code) noexcept {
LabelLink* linkToDelete = _link;
_link = _link->next;
@@ -81,11 +61,10 @@ public:
LabelLink* _link;
};
// ============================================================================
// [asmjit::CodeHolder - Utilities]
// ============================================================================
// CodeHolder - Utilities
// ======================
static void CodeHolder_resetInternal(CodeHolder* self, uint32_t resetPolicy) noexcept {
static void CodeHolder_resetInternal(CodeHolder* self, ResetPolicy resetPolicy) noexcept {
uint32_t i;
const ZoneVector<BaseEmitter*>& emitters = self->emitters();
@@ -134,27 +113,25 @@ static void CodeHolder_onSettingsUpdated(CodeHolder* self) noexcept {
}
}
// ============================================================================
// [asmjit::CodeHolder - Construction / Destruction]
// ============================================================================
// CodeHolder - Construction & Destruction
// =======================================
CodeHolder::CodeHolder() noexcept
CodeHolder::CodeHolder(const Support::Temporary* temporary) noexcept
: _environment(),
_baseAddress(Globals::kNoBaseAddress),
_logger(nullptr),
_errorHandler(nullptr),
_zone(16384 - Zone::kBlockOverhead),
_zone(16384 - Zone::kBlockOverhead, 1, temporary),
_allocator(&_zone),
_unresolvedLinkCount(0),
_addressTableSection(nullptr) {}
CodeHolder::~CodeHolder() noexcept {
CodeHolder_resetInternal(this, Globals::kResetHard);
CodeHolder_resetInternal(this, ResetPolicy::kHard);
}
// ============================================================================
// [asmjit::CodeHolder - Init / Reset]
// ============================================================================
// CodeHolder - Init & Reset
// =========================
inline void CodeHolder_setSectionDefaultName(
Section* section,
@@ -179,7 +156,7 @@ Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noe
if (err == kErrorOk) {
Section* section = _allocator.allocZeroedT<Section>();
if (ASMJIT_LIKELY(section)) {
section->_flags = Section::kFlagExec | Section::kFlagConst;
section->_flags = SectionFlags::kExecutable | SectionFlags::kReadOnly;
CodeHolder_setSectionDefaultName(section, '.', 't', 'e', 'x', 't');
_sections.appendUnsafe(section);
_sectionsByOrder.appendUnsafe(section);
@@ -200,13 +177,12 @@ Error CodeHolder::init(const Environment& environment, uint64_t baseAddress) noe
}
}
void CodeHolder::reset(uint32_t resetPolicy) noexcept {
void CodeHolder::reset(ResetPolicy resetPolicy) noexcept {
CodeHolder_resetInternal(this, resetPolicy);
}
// ============================================================================
// [asmjit::CodeHolder - Attach / Detach]
// ============================================================================
// CodeHolder - Attach / Detach
// ============================
Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
// Catch a possible misuse of the API.
@@ -214,8 +190,8 @@ Error CodeHolder::attach(BaseEmitter* emitter) noexcept {
return DebugUtils::errored(kErrorInvalidArgument);
// Invalid emitter, this should not be possible.
uint32_t type = emitter->emitterType();
if (ASMJIT_UNLIKELY(type == BaseEmitter::kTypeNone || type >= BaseEmitter::kTypeCount))
EmitterType type = emitter->emitterType();
if (ASMJIT_UNLIKELY(type == EmitterType::kNone || uint32_t(type) > uint32_t(EmitterType::kMaxValue)))
return DebugUtils::errored(kErrorInvalidState);
// This is suspicious, but don't fail if `emitter` is already attached
@@ -261,9 +237,8 @@ Error CodeHolder::detach(BaseEmitter* emitter) noexcept {
return err;
}
// ============================================================================
// [asmjit::CodeHolder - Logging]
// ============================================================================
// CodeHolder - Logging
// ====================
void CodeHolder::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
@@ -274,18 +249,16 @@ void CodeHolder::setLogger(Logger* logger) noexcept {
#endif
}
// ============================================================================
// [asmjit::CodeHolder - Error Handling]
// ============================================================================
// CodeHolder - Error Handling
// ===========================
void CodeHolder::setErrorHandler(ErrorHandler* errorHandler) noexcept {
_errorHandler = errorHandler;
CodeHolder_onSettingsUpdated(this);
}
// ============================================================================
// [asmjit::CodeHolder - Code Buffer]
// ============================================================================
// CodeHolder - Code Buffer
// ========================
static Error CodeHolder_reserveInternal(CodeHolder* self, CodeBuffer* cb, size_t n) noexcept {
uint8_t* oldData = cb->_data;
@@ -368,11 +341,10 @@ Error CodeHolder::reserveBuffer(CodeBuffer* cb, size_t n) noexcept {
return CodeHolder_reserveInternal(this, cb, n);
}
// ============================================================================
// [asmjit::CodeHolder - Sections]
// ============================================================================
// CodeHolder - Sections
// =====================
Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, uint32_t flags, uint32_t alignment, int32_t order) noexcept {
Error CodeHolder::newSection(Section** sectionOut, const char* name, size_t nameSize, SectionFlags flags, uint32_t alignment, int32_t order) noexcept {
*sectionOut = nullptr;
if (nameSize == SIZE_MAX)
@@ -435,7 +407,12 @@ Section* CodeHolder::ensureAddressTableSection() noexcept {
if (_addressTableSection)
return _addressTableSection;
newSection(&_addressTableSection, CodeHolder_addrTabName, sizeof(CodeHolder_addrTabName) - 1, 0, _environment.registerSize(), std::numeric_limits<int32_t>::max());
newSection(&_addressTableSection,
CodeHolder_addrTabName,
sizeof(CodeHolder_addrTabName) - 1,
SectionFlags::kNone,
_environment.registerSize(),
std::numeric_limits<int32_t>::max());
return _addressTableSection;
}
@@ -458,9 +435,8 @@ Error CodeHolder::addAddressToAddressTable(uint64_t address) noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeHolder - Labels / Symbols]
// ============================================================================
// CodeHolder - Labels & Symbols
// =============================
//! Only used to lookup a label from `_namedLabels`.
class LabelByName {
@@ -547,32 +523,66 @@ Error CodeHolder::newLabelEntry(LabelEntry** entryOut) noexcept {
return kErrorOk;
}
Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, uint32_t type, uint32_t parentId) noexcept {
Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, size_t nameSize, LabelType type, uint32_t parentId) noexcept {
*entryOut = nullptr;
uint32_t hashCode = CodeHolder_hashNameAndGetSize(name, nameSize);
if (ASMJIT_UNLIKELY(nameSize == 0))
return DebugUtils::errored(kErrorInvalidLabelName);
if (ASMJIT_UNLIKELY(nameSize == 0)) {
if (type == LabelType::kAnonymous)
return newLabelEntry(entryOut);
else
return DebugUtils::errored(kErrorInvalidLabelName);
}
if (ASMJIT_UNLIKELY(nameSize > Globals::kMaxLabelNameSize))
return DebugUtils::errored(kErrorLabelNameTooLong);
switch (type) {
case Label::kTypeLocal:
case LabelType::kAnonymous: {
// Anonymous labels cannot have a parent (or more specifically, parent is useless here).
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
return DebugUtils::errored(kErrorInvalidParentLabel);
uint32_t labelId = _labelEntries.size();
if (ASMJIT_UNLIKELY(labelId == Globals::kInvalidId))
return DebugUtils::errored(kErrorTooManyLabels);
ASMJIT_PROPAGATE(_labelEntries.willGrow(&_allocator));
LabelEntry* le = _allocator.allocZeroedT<LabelEntry>();
if (ASMJIT_UNLIKELY(!le))
return DebugUtils::errored(kErrorOutOfMemory);
// NOTE: This LabelEntry has a name, but we leave its hashCode as zero as it's anonymous.
le->_setId(labelId);
le->_parentId = Globals::kInvalidId;
le->_offset = 0;
ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize));
_labelEntries.appendUnsafe(le);
*entryOut = le;
return kErrorOk;
}
case LabelType::kLocal: {
if (ASMJIT_UNLIKELY(parentId >= _labelEntries.size()))
return DebugUtils::errored(kErrorInvalidParentLabel);
hashCode ^= parentId;
break;
}
case Label::kTypeGlobal:
case Label::kTypeExternal:
case LabelType::kGlobal:
case LabelType::kExternal: {
if (ASMJIT_UNLIKELY(parentId != Globals::kInvalidId))
return DebugUtils::errored(kErrorNonLocalLabelCannotHaveParent);
return DebugUtils::errored(kErrorInvalidParentLabel);
break;
}
default:
default: {
return DebugUtils::errored(kErrorInvalidArgument);
}
}
// Don't allow to insert duplicates. Local labels allow duplicates that have
@@ -596,7 +606,7 @@ Error CodeHolder::newNamedLabelEntry(LabelEntry** entryOut, const char* name, si
le->_hashCode = hashCode;
le->_setId(labelId);
le->_type = uint8_t(type);
le->_type = type;
le->_parentId = parentId;
le->_offset = 0;
ASMJIT_PROPAGATE(le->_name.setData(&_zone, name, nameSize));
@@ -733,11 +743,10 @@ ASMJIT_API Error CodeHolder::bindLabel(const Label& label, uint32_t toSectionId,
return err;
}
// ============================================================================
// [asmjit::BaseEmitter - Relocations]
// ============================================================================
// CodeHolder - Relocations
// ========================
Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType) noexcept {
Error CodeHolder::newRelocEntry(RelocEntry** dst, RelocType relocType) noexcept {
ASMJIT_PROPAGATE(_relocations.willGrow(&_allocator));
uint32_t relocId = _relocations.size();
@@ -749,7 +758,7 @@ Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType) noexcept {
return DebugUtils::errored(kErrorOutOfMemory);
re->_id = relocId;
re->_relocType = uint8_t(relocType);
re->_relocType = relocType;
re->_sourceSectionId = Globals::kInvalidId;
re->_targetSectionId = Globals::kInvalidId;
_relocations.appendUnsafe(re);
@@ -758,26 +767,25 @@ Error CodeHolder::newRelocEntry(RelocEntry** dst, uint32_t relocType) noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseEmitter - Expression Evaluation]
// ============================================================================
// CodeHolder - Expression Evaluation
// ==================================
static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, uint64_t* out) noexcept {
uint64_t value[2];
for (size_t i = 0; i < 2; i++) {
uint64_t v;
switch (exp->valueType[i]) {
case Expression::kValueNone: {
case ExpressionValueType::kNone: {
v = 0;
break;
}
case Expression::kValueConstant: {
case ExpressionValueType::kConstant: {
v = exp->value[i].constant;
break;
}
case Expression::kValueLabel: {
case ExpressionValueType::kLabel: {
LabelEntry* le = exp->value[i].label;
if (!le->isBound())
return DebugUtils::errored(kErrorExpressionLabelNotBound);
@@ -785,7 +793,7 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui
break;
}
case Expression::kValueExpression: {
case ExpressionValueType::kExpression: {
Expression* nested = exp->value[i].expression;
ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(self, nested, &v));
break;
@@ -803,27 +811,27 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui
uint64_t& b = value[1];
switch (exp->opType) {
case Expression::kOpAdd:
case ExpressionOpType::kAdd:
result = a + b;
break;
case Expression::kOpSub:
case ExpressionOpType::kSub:
result = a - b;
break;
case Expression::kOpMul:
case ExpressionOpType::kMul:
result = a * b;
break;
case Expression::kOpSll:
case ExpressionOpType::kSll:
result = (b > 63) ? uint64_t(0) : uint64_t(a << b);
break;
case Expression::kOpSrl:
case ExpressionOpType::kSrl:
result = (b > 63) ? uint64_t(0) : uint64_t(a >> b);
break;
case Expression::kOpSra:
case ExpressionOpType::kSra:
result = Support::sar(a, Support::min<uint64_t>(b, 63));
break;
@@ -835,9 +843,8 @@ static Error CodeHolder_evaluateExpression(CodeHolder* self, Expression* exp, ui
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseEmitter - Utilities]
// ============================================================================
// CodeHolder - Utilities
// ======================
Error CodeHolder::flatten() noexcept {
uint64_t offset = 0;
@@ -917,7 +924,7 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
// Relocate all recorded locations.
for (const RelocEntry* re : _relocations) {
// Possibly deleted or optimized-out entry.
if (re->relocType() == RelocEntry::kTypeNone)
if (re->relocType() == RelocType::kNone)
continue;
Section* sourceSection = sectionById(re->sourceSectionId());
@@ -940,17 +947,17 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
size_t valueOffset = size_t(re->sourceOffset()) + re->format().valueOffset();
switch (re->relocType()) {
case RelocEntry::kTypeExpression: {
case RelocType::kExpression: {
Expression* expression = (Expression*)(uintptr_t(value));
ASMJIT_PROPAGATE(CodeHolder_evaluateExpression(this, expression, &value));
break;
}
case RelocEntry::kTypeAbsToAbs: {
case RelocType::kAbsToAbs: {
break;
}
case RelocEntry::kTypeRelToAbs: {
case RelocType::kRelToAbs: {
// Value is currently a relative offset from the start of its section.
// We have to convert it to an absolute offset (including base address).
if (ASMJIT_UNLIKELY(!targetSection))
@@ -961,14 +968,14 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
break;
}
case RelocEntry::kTypeAbsToRel: {
case RelocType::kAbsToRel: {
value -= baseAddress + sectionOffset + sourceOffset + regionSize;
if (addressSize > 4 && !Support::isInt32(int64_t(value)))
return DebugUtils::errored(kErrorRelocOffsetOutOfRange);
break;
}
case RelocEntry::kTypeX64AddressEntry: {
case RelocType::kX64AddressEntry: {
if (re->format().valueSize() != 4 || valueOffset < 2)
return DebugUtils::errored(kErrorInvalidRelocEntry);
@@ -1055,7 +1062,7 @@ Error CodeHolder::relocateToBase(uint64_t baseAddress) noexcept {
return kErrorOk;
}
Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, uint32_t copyOptions) noexcept {
Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId, CopySectionFlags copyFlags) noexcept {
if (ASMJIT_UNLIKELY(!isSectionValid(sectionId)))
return DebugUtils::errored(kErrorInvalidSection);
@@ -1067,7 +1074,7 @@ Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId,
memcpy(dst, section->data(), bufferSize);
if (bufferSize < dstSize && (copyOptions & kCopyPadSectionBuffer)) {
if (bufferSize < dstSize && Support::test(copyFlags, CopySectionFlags::kPadSectionBuffer)) {
size_t paddingSize = dstSize - bufferSize;
memset(static_cast<uint8_t*>(dst) + bufferSize, 0, paddingSize);
}
@@ -1075,7 +1082,7 @@ Error CodeHolder::copySectionData(void* dst, size_t dstSize, uint32_t sectionId,
return kErrorOk;
}
Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOptions) noexcept {
Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, CopySectionFlags copyFlags) noexcept {
size_t end = 0;
for (Section* section : _sectionsByOrder) {
if (section->offset() > dstSize)
@@ -1091,7 +1098,7 @@ Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOpti
size_t paddingSize = 0;
memcpy(dstTarget, section->data(), bufferSize);
if ((copyOptions & kCopyPadSectionBuffer) && bufferSize < section->virtualSize()) {
if (Support::test(copyFlags, CopySectionFlags::kPadSectionBuffer) && bufferSize < section->virtualSize()) {
paddingSize = Support::min<size_t>(dstSize - offset, size_t(section->virtualSize())) - bufferSize;
memset(dstTarget + bufferSize, 0, paddingSize);
}
@@ -1099,16 +1106,15 @@ Error CodeHolder::copyFlattenedData(void* dst, size_t dstSize, uint32_t copyOpti
end = Support::max(end, offset + bufferSize + paddingSize);
}
if (end < dstSize && (copyOptions & kCopyPadTargetBuffer)) {
if (end < dstSize && Support::test(copyFlags, CopySectionFlags::kPadTargetBuffer)) {
memset(static_cast<uint8_t*>(dst) + end, 0, dstSize - end);
}
return kErrorOk;
}
// ============================================================================
// [asmjit::CodeHolder - Unit]
// ============================================================================
// CodeHolder - Tests
// ==================
#if defined(ASMJIT_TEST)
UNIT(code_holder) {
@@ -1116,34 +1122,33 @@ UNIT(code_holder) {
INFO("Verifying CodeHolder::init()");
Environment env;
env.init(Environment::kArchX86);
env.init(Arch::kX86);
code.init(env);
EXPECT(code.arch() == Environment::kArchX86);
EXPECT(code.arch() == Arch::kX86);
INFO("Verifying named labels");
LabelEntry* le;
EXPECT(code.newNamedLabelEntry(&le, "NamedLabel", SIZE_MAX, Label::kTypeGlobal) == kErrorOk);
EXPECT(code.newNamedLabelEntry(&le, "NamedLabel", SIZE_MAX, LabelType::kGlobal) == kErrorOk);
EXPECT(strcmp(le->name(), "NamedLabel") == 0);
EXPECT(code.labelIdByName("NamedLabel") == le->id());
INFO("Verifying section ordering");
Section* section1;
EXPECT(code.newSection(&section1, "high-priority", SIZE_MAX, 0, 1, -1) == kErrorOk);
EXPECT(code.newSection(&section1, "high-priority", SIZE_MAX, SectionFlags::kNone, 1, -1) == kErrorOk);
EXPECT(code.sections()[1] == section1);
EXPECT(code.sectionsByOrder()[0] == section1);
Section* section0;
EXPECT(code.newSection(&section0, "higher-priority", SIZE_MAX, 0, 1, -2) == kErrorOk);
EXPECT(code.newSection(&section0, "higher-priority", SIZE_MAX, SectionFlags::kNone, 1, -2) == kErrorOk);
EXPECT(code.sections()[2] == section0);
EXPECT(code.sectionsByOrder()[0] == section0);
EXPECT(code.sectionsByOrder()[1] == section1);
Section* section3;
EXPECT(code.newSection(&section3, "low-priority", SIZE_MAX, 0, 1, 2) == kErrorOk);
EXPECT(code.newSection(&section3, "low-priority", SIZE_MAX, SectionFlags::kNone, 1, 2) == kErrorOk);
EXPECT(code.sections()[3] == section3);
EXPECT(code.sectionsByOrder()[3] == section3);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
@@ -50,13 +32,13 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
case OffsetType::kCommon: {
*dst = (uint32_t(offset32) & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
return true;
}
case OffsetFormat::kTypeAArch64_ADR:
case OffsetFormat::kTypeAArch64_ADRP: {
case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5)
return false;
@@ -91,7 +73,7 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
return false;
switch (format.type()) {
case OffsetFormat::kTypeCommon: {
case OffsetType::kCommon: {
*dst = (uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
return true;
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
#define ASMJIT_CORE_CODEBUFFERWRITER_P_H_INCLUDED
@@ -34,25 +16,17 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_assembler
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
struct OffsetFormat;
// ============================================================================
// [asmjit::CodeWriter]
// ============================================================================
//! Helper that is used to write into a \ref CodeBuffer held by \ref BaseAssembler.
class CodeWriter {
public:
uint8_t* _cursor;
ASMJIT_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
ASMJIT_FORCE_INLINE explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
ASMJIT_FORCE_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
CodeBuffer& buffer = a->_section->_buffer;
@@ -64,24 +38,24 @@ public:
return kErrorOk;
}
ASMJIT_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_FORCE_INLINE uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_FORCE_INLINE void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_FORCE_INLINE void advance(size_t n) noexcept { _cursor += n; }
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_FORCE_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
template<typename T>
ASMJIT_INLINE void emit8(T val) noexcept {
ASMJIT_FORCE_INLINE void emit8(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
_cursor[0] = uint8_t(U(val) & U(0xFF));
_cursor++;
}
template<typename T, typename Y>
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
ASMJIT_FORCE_INLINE void emit8If(T val, Y cond) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size_t(cond) <= 1u);
@@ -90,41 +64,41 @@ public:
}
template<typename T>
ASMJIT_INLINE void emit16uLE(T val) noexcept {
ASMJIT_FORCE_INLINE void emit16uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uLE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit16uBE(T val) noexcept {
ASMJIT_FORCE_INLINE void emit16uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU16uBE(_cursor, uint32_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit32uLE(T val) noexcept {
ASMJIT_FORCE_INLINE void emit32uLE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uLE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_INLINE void emit32uBE(T val) noexcept {
ASMJIT_FORCE_INLINE void emit32uBE(T val) noexcept {
typedef typename std::make_unsigned<T>::type U;
Support::writeU32uBE(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_FORCE_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
ASMJIT_FORCE_INLINE void emitValueLE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
@@ -137,7 +111,7 @@ public:
}
template<typename T>
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
ASMJIT_FORCE_INLINE void emitValueBE(const T& value, size_t size) noexcept {
typedef typename std::make_unsigned<T>::type U;
ASMJIT_ASSERT(size <= sizeof(T));
@@ -149,13 +123,13 @@ public:
_cursor += size;
}
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_FORCE_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
}
ASMJIT_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_FORCE_INLINE void remove8(uint8_t* where) noexcept {
ASMJIT_ASSERT(where < _cursor);
uint8_t* p = where;
@@ -165,7 +139,7 @@ public:
}
template<typename T>
ASMJIT_INLINE void insert8(uint8_t* where, T val) noexcept {
ASMJIT_FORCE_INLINE void insert8(uint8_t* where, T val) noexcept {
uint8_t* p = _cursor;
while (p != where) {
@@ -177,7 +151,7 @@ public:
_cursor++;
}
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
ASMJIT_FORCE_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
@@ -187,10 +161,7 @@ public:
}
};
// ============================================================================
// [asmjit::CodeWriterUtils]
// ============================================================================
//! Code writer utilities.
namespace CodeWriterUtils {
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
@@ -35,11 +17,11 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::GlobalConstPoolPass]
// ============================================================================
// GlobalConstPoolPass
// ===================
class GlobalConstPoolPass : public Pass {
public:
typedef Pass Base;
public:
ASMJIT_NONCOPYABLE(GlobalConstPoolPass)
@@ -51,53 +33,50 @@ public:
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
if (compiler->_globalConstPool) {
compiler->addAfter(compiler->_globalConstPool, compiler->lastNode());
compiler->_globalConstPool = nullptr;
ConstPoolNode* globalConstPool = compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)];
if (globalConstPool) {
compiler->addAfter(globalConstPool, compiler->lastNode());
compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
}
return kErrorOk;
}
};
// ============================================================================
// [asmjit::BaseCompiler - Construction / Destruction]
// ============================================================================
// BaseCompiler - Construction & Destruction
// =========================================
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegZone(4096 - Zone::kBlockOverhead),
_vRegArray(),
_localConstPool(nullptr),
_globalConstPool(nullptr) {
_emitterType = uint8_t(kTypeCompiler);
_validationFlags = uint8_t(InstAPI::kValidationFlagVirtRegs);
_constPools { nullptr, nullptr } {
_emitterType = EmitterType::kCompiler;
_validationFlags = ValidationFlags::kEnableVirtRegs;
}
BaseCompiler::~BaseCompiler() noexcept {}
// ============================================================================
// [asmjit::BaseCompiler - Function Management]
// ============================================================================
// BaseCompiler - Function Management
// ==================================
Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature) {
Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(_newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelNode::kSentinelFuncEnd));
ASMJIT_PROPAGATE(newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelType::kFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err))
return reportError(err);
// If the Target guarantees greater stack alignment than required by the
// calling convention then override it as we can prevent having to perform
// dynamic stack alignment
// If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment)
@@ -123,13 +102,13 @@ Error BaseCompiler::_newFuncNode(FuncNode** out, const FuncSignature& signature)
return kErrorOk;
}
Error BaseCompiler::_addFuncNode(FuncNode** out, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newFuncNode(out, signature));
Error BaseCompiler::addFuncNode(FuncNode** out, const FuncSignature& signature) {
ASMJIT_PROPAGATE(newFuncNode(out, signature));
addFunc(*out);
return kErrorOk;
}
Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
Error BaseCompiler::newFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
FuncRetNode* node;
@@ -143,8 +122,8 @@ Error BaseCompiler::_newRetNode(FuncRetNode** out, const Operand_& o0, const Ope
return kErrorOk;
}
Error BaseCompiler::_addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
ASMJIT_PROPAGATE(_newRetNode(out, o0, o1));
Error BaseCompiler::addFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
ASMJIT_PROPAGATE(newFuncRetNode(out, o0, o1));
addNode(*out);
return kErrorOk;
}
@@ -169,10 +148,11 @@ Error BaseCompiler::endFunc() {
return reportError(DebugUtils::errored(kErrorInvalidState));
// Add the local constant pool at the end of the function (if exists).
if (_localConstPool) {
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
if (localConstPool) {
setCursor(func->endNode()->prev());
addNode(_localConstPool);
_localConstPool = nullptr;
addNode(localConstPool);
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
}
// Mark as finished.
@@ -184,28 +164,12 @@ Error BaseCompiler::endFunc() {
return kErrorOk;
}
Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& r) {
FuncNode* func = _func;
// BaseCompiler - Function Invocation
// ==================================
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
if (ASMJIT_UNLIKELY(!isVirtRegValid(r)))
return reportError(DebugUtils::errored(kErrorInvalidVirtId));
VirtReg* vReg = virtRegByReg(r);
func->setArg(argIndex, valueIndex, vReg);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Function Invocation]
// ============================================================================
Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, 0u));
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, InstOptions::kNone));
node->setOpCount(1);
node->setOp(0, o0);
@@ -228,15 +192,14 @@ Error BaseCompiler::_newInvokeNode(InvokeNode** out, uint32_t instId, const Oper
return kErrorOk;
}
Error BaseCompiler::_addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
ASMJIT_PROPAGATE(_newInvokeNode(out, instId, o0, signature));
Error BaseCompiler::addInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
ASMJIT_PROPAGATE(newInvokeNode(out, instId, o0, signature));
addNode(*out);
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseCompiler - Virtual Registers]
// ============================================================================
// BaseCompiler - Virtual Registers
// ================================
static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
uint32_t index = unsigned(Operand::virtIdToIndex(vReg->_id));
@@ -248,7 +211,7 @@ static void BaseCompiler_assignGenericName(BaseCompiler* self, VirtReg* vReg) {
vReg->_name.setData(&self->_dataZone, buf, unsigned(size));
}
Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name) {
Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
@@ -262,10 +225,10 @@ Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signatur
if (ASMJIT_UNLIKELY(!vReg))
return reportError(DebugUtils::errored(kErrorOutOfMemory));
uint32_t size = Type::sizeOf(typeId);
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
vReg = new(vReg) VirtReg(Operand::indexToVirtId(index), signature, size, alignment, typeId);
vReg = new(vReg) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0')
@@ -282,22 +245,22 @@ Error BaseCompiler::newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signatur
return kErrorOk;
}
Error BaseCompiler::_newReg(BaseReg* out, uint32_t typeId, const char* name) {
RegInfo regInfo;
Error BaseCompiler::_newReg(BaseReg* out, TypeId typeId, const char* name) {
OperandSignature regSignature;
out->reset();
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
out->_initReg(regInfo.signature(), vReg->id());
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...) {
Error BaseCompiler::_newRegFmt(BaseReg* out, TypeId typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
@@ -311,75 +274,72 @@ Error BaseCompiler::_newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, .
Error BaseCompiler::_newReg(BaseReg* out, const BaseReg& ref, const char* name) {
out->reset();
RegInfo regInfo;
uint32_t typeId;
OperandSignature regSignature;
TypeId typeId;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
// NOTE: It's possible to cast one register type to another if it's the
// same register group. However, VirtReg always contains the TypeId that
// was used to create the register. This means that in some cases we may
// end up having different size of `ref` and `vRef`. In such case we
// adjust the TypeId to match the `ref` register type instead of the
// original register type, which should be the expected behavior.
uint32_t typeSize = Type::sizeOf(typeId);
// NOTE: It's possible to cast one register type to another if it's the same register group. However, VirtReg
// always contains the TypeId that was used to create the register. This means that in some cases we may end
// up having different size of `ref` and `vRef`. In such case we adjust the TypeId to match the `ref` register
// type instead of the original register type, which should be the expected behavior.
uint32_t typeSize = TypeUtils::sizeOf(typeId);
uint32_t refSize = ref.size();
if (typeSize != refSize) {
if (Type::isInt(typeId)) {
if (TypeUtils::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = Type::kIdI8 | (typeId & 1); break;
case 2: typeId = Type::kIdI16 | (typeId & 1); break;
case 4: typeId = Type::kIdI32 | (typeId & 1); break;
case 8: typeId = Type::kIdI64 | (typeId & 1); break;
default: typeId = Type::kIdVoid; break;
case 1: typeId = TypeId(uint32_t(TypeId::kInt8 ) | (uint32_t(typeId) & 1)); break;
case 2: typeId = TypeId(uint32_t(TypeId::kInt16) | (uint32_t(typeId) & 1)); break;
case 4: typeId = TypeId(uint32_t(TypeId::kInt32) | (uint32_t(typeId) & 1)); break;
case 8: typeId = TypeId(uint32_t(TypeId::kInt64) | (uint32_t(typeId) & 1)); break;
default: typeId = TypeId::kVoid; break;
}
}
else if (Type::isMmx(typeId)) {
else if (TypeUtils::isMmx(typeId)) {
// MMX register - always use 64-bit.
typeId = Type::kIdMmx64;
typeId = TypeId::kMmx64;
}
else if (Type::isMask(typeId)) {
else if (TypeUtils::isMask(typeId)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = Type::kIdMask8; break;
case 2: typeId = Type::kIdMask16; break;
case 4: typeId = Type::kIdMask32; break;
case 8: typeId = Type::kIdMask64; break;
default: typeId = Type::kIdVoid; break;
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
}
}
else {
// VEC register - change TypeId to match `ref` size, keep vector metadata.
uint32_t elementTypeId = Type::baseOf(typeId);
// Vector register - change TypeId to match `ref` size, keep vector metadata.
TypeId scalarTypeId = TypeUtils::scalarOf(typeId);
switch (refSize) {
case 16: typeId = Type::_kIdVec128Start + (elementTypeId - Type::kIdI8); break;
case 32: typeId = Type::_kIdVec256Start + (elementTypeId - Type::kIdI8); break;
case 64: typeId = Type::_kIdVec512Start + (elementTypeId - Type::kIdI8); break;
default: typeId = Type::kIdVoid; break;
case 16: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec128Start); break;
case 32: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec256Start); break;
case 64: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec512Start); break;
default: typeId = TypeId::kVoid; break;
}
}
if (typeId == Type::kIdVoid)
if (typeId == TypeId::kVoid)
return reportError(DebugUtils::errored(kErrorInvalidState));
}
}
else {
typeId = ref.type();
typeId = ArchTraits::byArch(arch()).regTypeToTypeId(ref.type());
}
Error err = ArchUtils::typeIdToRegInfo(arch(), typeId, &typeId, &regInfo);
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regInfo.signature(), name));
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
out->_initReg(regInfo.signature(), vReg->id());
out->_initReg(regSignature, vReg->id());
return kErrorOk;
}
@@ -410,14 +370,17 @@ Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, c
alignment = 64;
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, 0, 0, name));
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature(0), name));
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(BaseMem::Decomposed { _gpRegInfo.type(), vReg->id(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_gpSignature.regType()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
vReg->id(), 0, 0);
return kErrorOk;
}
@@ -438,9 +401,8 @@ Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t new
if (newAlignment)
vReg->_alignment = uint8_t(newAlignment);
// This is required if the RAPass is already running. There is a chance that
// a stack-slot has been already allocated and in that case it has to be
// updated as well, otherwise we would allocate wrong amount of memory.
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
@@ -450,37 +412,26 @@ Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t new
return kErrorOk;
}
Error BaseCompiler::_newConst(BaseMem* out, uint32_t scope, const void* data, size_t size) {
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
out->reset();
ConstPoolNode** pPool;
if (scope == ConstPool::kScopeLocal)
pPool = &_localConstPool;
else if (scope == ConstPool::kScopeGlobal)
pPool = &_globalConstPool;
else
if (uint32_t(scope) > 1)
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (!*pPool)
ASMJIT_PROPAGATE(_newConstPoolNode(pPool));
if (!_constPools[uint32_t(scope)])
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
ConstPoolNode* pool = *pPool;
ConstPoolNode* pool = _constPools[uint32_t(scope)];
size_t off;
Error err = pool->add(data, size, off);
if (ASMJIT_UNLIKELY(err))
return reportError(err);
*out = BaseMem(BaseMem::Decomposed {
Label::kLabelTag, // Base type.
pool->labelId(), // Base id.
0, // Index type.
0, // Index id.
int32_t(off), // Offset.
uint32_t(size), // Size.
0 // Flags.
});
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) |
OperandSignature::fromSize(uint32_t(size)),
pool->labelId(), 0, int32_t(off));
return kErrorOk;
}
@@ -505,11 +456,10 @@ void BaseCompiler::rename(const BaseReg& reg, const char* fmt, ...) {
}
}
// ============================================================================
// [asmjit::BaseCompiler - Jump Annotations]
// ============================================================================
// BaseCompiler - Jump Annotations
// ===============================
Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation) {
Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _allocator.allocT<JumpNode>();
uint32_t opCount = 1;
@@ -524,8 +474,8 @@ Error BaseCompiler::newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOp
return kErrorOk;
}
Error BaseCompiler::emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation) {
uint32_t options = instOptions() | forcedInstOptions();
Error BaseCompiler::emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation) {
InstOptions options = instOptions() | forcedInstOptions();
RegOnly extra = extraReg();
const char* comment = inlineComment();
@@ -562,16 +512,15 @@ JumpAnnotation* BaseCompiler::newJumpAnnotation() {
return jumpAnnotation;
}
// ============================================================================
// [asmjit::BaseCompiler - Events]
// ============================================================================
// BaseCompiler - Events
// =====================
Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits.regTypeToSignature(nativeRegType));
RegType nativeRegType = Environment::is32Bit(code->arch()) ? RegType::kGp32 : RegType::kGp64;
_gpSignature = archTraits.regTypeToSignature(nativeRegType);
Error err = addPassT<GlobalConstPoolPass>();
if (ASMJIT_UNLIKELY(err)) {
@@ -584,8 +533,8 @@ Error BaseCompiler::onAttach(CodeHolder* code) noexcept {
Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
_func = nullptr;
_localConstPool = nullptr;
_globalConstPool = nullptr;
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
_vRegArray.reset();
_vRegZone.reset();
@@ -593,32 +542,30 @@ Error BaseCompiler::onDetach(CodeHolder* code) noexcept {
return Base::onDetach(code);
}
// ============================================================================
// [asmjit::FuncPass - Construction / Destruction]
// ============================================================================
// FuncPass - Construction & Destruction
// =====================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
// ============================================================================
// [asmjit::FuncPass - Run]
// ============================================================================
// FuncPass - Run
// ==============
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
if (!node) return kErrorOk;
do {
if (node->type() == BaseNode::kNodeFunc) {
if (node->type() == NodeType::kFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `kNodeFunc`.
// Find a function by skipping all nodes that are not `NodeType::kFunc`.
do {
node = node->next();
} while (node && node->type() != BaseNode::kNodeFunc);
} while (node && node->type() != NodeType::kFunc);
} while (node);
return kErrorOk;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILER_H_INCLUDED
#define ASMJIT_CORE_COMPILER_H_INCLUDED
@@ -40,10 +22,6 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class JumpAnnotation;
class JumpNode;
class FuncNode;
@@ -53,25 +31,18 @@ class InvokeNode;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::BaseCompiler]
// ============================================================================
//! Code emitter that uses virtual registers and performs register allocation.
//!
//! Compiler is a high-level code-generation tool that provides register
//! allocation and automatic handling of function calling conventions. It was
//! primarily designed for merging multiple parts of code into a function
//! without worrying about registers and function calling conventions.
//! Compiler is a high-level code-generation tool that provides register allocation and automatic handling of function
//! calling conventions. It was primarily designed for merging multiple parts of code into a function without worrying
//! about registers and function calling conventions.
//!
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and
//! 64-bit code generation within a single code base.
//! BaseCompiler can be used, with a minimum effort, to handle 32-bit and 64-bit code generation within a single code
//! base.
//!
//! BaseCompiler is based on BaseBuilder and contains all the features it
//! provides. It means that the code it stores can be modified (removed, added,
//! injected) and analyzed. When the code is finalized the compiler can emit
//! the code into an Assembler to translate the abstract representation into a
//! machine code.
//! BaseCompiler is based on BaseBuilder and contains all the features it provides. It means that the code it stores
//! can be modified (removed, added, injected) and analyzed. When the code is finalized the compiler can emit the code
//! into an Assembler to translate the abstract representation into a machine code.
//!
//! Check out architecture specific compilers for more details and examples:
//!
@@ -81,6 +52,9 @@ public:
ASMJIT_NONCOPYABLE(BaseCompiler)
typedef BaseBuilder Base;
//! \name Members
//! \{
//! Current function.
FuncNode* _func;
//! Allocates `VirtReg` objects.
@@ -90,10 +64,12 @@ public:
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
//! Local constant pool, flushed at the end of each function.
ConstPoolNode* _localConstPool;
//! Global constant pool, flushed by `finalize()`.
ConstPoolNode* _globalConstPool;
//! Local and global constant pools.
//!
//! Local constant pool is flushed with each function, global constant pool is flushed only by \ref finalize().
ConstPoolNode* _constPools[2];
//! \}
//! \name Construction & Destruction
//! \{
@@ -108,31 +84,31 @@ public:
//! \name Function Management
//! \{
//! Returns the current function.
inline FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode.
ASMJIT_API Error _newFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the compiler.
ASMJIT_API Error _addFuncNode(FuncNode** out, const FuncSignature& signature);
ASMJIT_API Error newFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the instruction stream.
ASMJIT_API Error addFuncNode(FuncNode** out, const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error _newRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the compiler.
ASMJIT_API Error _addRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
ASMJIT_API Error newFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the instruction stream.
ASMJIT_API Error addFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1);
//! Returns the current function.
inline FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
FuncNode* node;
_newFuncNode(&node, signature);
newFuncNode(&node, signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the
//! compiler by using the \ref addFunc(FuncNode*) overload, and returns it.
//! Creates a new \ref FuncNode with the given `signature`, adds it to the instruction stream by using
//! the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
FuncNode* node;
_addFuncNode(&node, signature);
addFuncNode(&node, signature);
return node;
}
@@ -141,23 +117,21 @@ public:
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
ASMJIT_API Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
#if !defined(ASMJIT_NO_DEPRECATED)
inline Error _setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg);
//! Sets a function argument at `argIndex` to `reg`.
ASMJIT_DEPRECATED("Setting arguments through Compiler is deprecated, use FuncNode->setArg() instead")
inline Error setArg(size_t argIndex, const BaseReg& reg) { return _setArg(argIndex, 0, reg); }
//! Sets a function argument at `argIndex` at `valueIndex` to `reg`.
ASMJIT_DEPRECATED("Setting arguments through Compiler is deprecated, use FuncNode->setArg() instead")
inline Error setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) { return _setArg(argIndex, valueIndex, reg); }
#endif
inline FuncRetNode* newRet(const Operand_& o0, const Operand_& o1) {
inline Error addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_newRetNode(&node, o0, o1);
return node;
}
inline FuncRetNode* addRet(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
_addRetNode(&node, o0, o1);
return node;
return addFuncRetNode(&node, o0, o1);
}
//! \}
@@ -166,23 +140,9 @@ public:
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error _newInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to Compiler.
ASMJIT_API Error _addInvokeNode(InvokeNode** out, uint32_t instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new `InvokeNode`.
inline InvokeNode* newCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_newInvokeNode(&node, instId, o0, signature);
return node;
}
//! Adds a new `InvokeNode`.
inline InvokeNode* addCall(uint32_t instId, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node;
_addInvokeNode(&node, instId, o0, signature);
return node;
}
ASMJIT_API Error newInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to the instruction stream.
ASMJIT_API Error addInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature);
//! \}
@@ -191,18 +151,17 @@ public:
//! Creates a new virtual register representing the given `typeId` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used
//! by AsmJit users, use architecture-specific `newReg()` functionality instead
//! or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** out, uint32_t typeId, uint32_t signature, const char* name);
//! \note This function is public, but it's not generally recommended to be used by AsmJit users, use architecture
//! specific `newReg()` functionality instead or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** out, TypeId typeId, OperandSignature signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(BaseReg* out, uint32_t typeId, const char* name = nullptr);
ASMJIT_API Error _newReg(BaseReg* out, TypeId typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(BaseReg* out, uint32_t typeId, const char* fmt, ...);
ASMJIT_API Error _newRegFmt(BaseReg* out, TypeId typeId, const char* fmt, ...);
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(BaseReg* out, const BaseReg& ref, const char* name = nullptr);
@@ -233,9 +192,8 @@ public:
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between
//! id and its index is implemented by \ref Operand_::virtIdToIndex() and \ref
//! Operand_::indexToVirtId() functions.
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
inline VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
@@ -262,11 +220,11 @@ public:
//! \name Constants
//! \{
//! Creates a new constant of the given `scope` (see \ref ConstPool::Scope).
//! Creates a new constant of the given `scope` (see \ref ConstPoolScope).
//!
//! This function adds a constant of the given `size` to the built-in \ref
//! ConstPool and stores the reference to that constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* out, uint32_t scope, const void* data, size_t size);
//! This function adds a constant of the given `size` to the built-in \ref ConstPool and stores the reference to that
//! constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size);
//! \}
@@ -285,23 +243,15 @@ public:
return _jumpAnnotations;
}
ASMJIT_API Error newJumpNode(JumpNode** out, uint32_t instId, uint32_t instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(uint32_t instId, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error newJumpNode(JumpNode** out, InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate
//! possible targets of a jump where the target is not a label, for example
//! to implement jump tables.
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables.
ASMJIT_API JumpAnnotation* newJumpAnnotation();
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("alloc() has no effect, it will be removed in the future")
inline void alloc(BaseReg&) {}
ASMJIT_DEPRECATED("spill() has no effect, it will be removed in the future")
inline void spill(BaseReg&) {}
#endif // !ASMJIT_NO_DEPRECATED
//! \name Events
//! \{
@@ -311,22 +261,19 @@ public:
//! \}
};
// ============================================================================
// [asmjit::JumpAnnotation]
// ============================================================================
//! Jump annotation used to annotate jumps.
//!
//! \ref BaseCompiler allows to emit jumps where the target is either register
//! or memory operand. Such jumps cannot be trivially inspected, so instead of
//! doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then use the annotation to construct control-flow, which
//! is then used by liveness analysis and other tools to prepare ground for
//! register allocation.
//! \ref BaseCompiler allows to emit jumps where the target is either register or memory operand. Such jumps cannot be
//! trivially inspected, so instead of doing heuristics AsmJit allows to annotate such jumps with possible targets.
//! Register allocator then uses the annotation to construct control-flow, which is then used by liveness analysis and
//! other tools to prepare ground for register allocation.
class JumpAnnotation {
public:
ASMJIT_NONCOPYABLE(JumpAnnotation)
//! \name Members
//! \{
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
@@ -334,10 +281,20 @@ public:
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
//! \}
//! \name Construction & Destruction
//! \{
inline JumpAnnotation(BaseCompiler* compiler, uint32_t annotationId) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
//! \}
//! \name Accessors
//! \{
//! Returns the compiler that owns this JumpAnnotation.
inline BaseCompiler* compiler() const noexcept { return _compiler; }
//! Returns the annotation id.
@@ -350,35 +307,42 @@ public:
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
inline bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
//! \}
//! \name Annotation Building API
//! \{
//! Adds the `label` to the list of targets of this JumpAnnotation.
inline Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
inline Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
};
// ============================================================================
// [asmjit::JumpNode]
// ============================================================================
//! \}
};
//! Jump instruction with \ref JumpAnnotation.
//!
//! \note This node should be only used to represent jump where the jump target
//! cannot be deduced by examining instruction operands. For example if the jump
//! target is register or memory location. This pattern is often used to perform
//! indirect jumps that use jump table, e.g. to implement `switch{}` statement.
//! \note This node should be only used to represent jump where the jump target cannot be deduced by examining
//! instruction operands. For example if the jump target is register or memory location. This pattern is often
//! used to perform indirect jumps that use jump table, e.g. to implement `switch{}` statement.
class JumpNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(JumpNode)
//! \name Members
//! \{
JumpAnnotation* _annotation;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE JumpNode(BaseCompiler* cc, uint32_t instId, uint32_t options, uint32_t opCount, JumpAnnotation* annotation) noexcept
inline JumpNode(BaseCompiler* cc, InstId instId, InstOptions options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNode(cc, instId, options, opCount, kBaseOpCapacity),
_annotation(annotation) {
setType(kNodeJump);
setType(NodeType::kJump);
}
//! \}
@@ -396,31 +360,24 @@ public:
//! \}
};
// ============================================================================
// [asmjit::FuncNode]
// ============================================================================
//! Function node represents a function used by \ref BaseCompiler.
//!
//! A function is composed of the following:
//!
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit.
//! To get the entry, simply use \ref FuncNode::label(), which is the same
//! as \ref LabelNode::label().
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit. To get the entry, simply use
//! \ref FuncNode::label(), which is the same as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A
//! helper function \ref FuncNode::exitLabel() exists and returns an exit
//! label instead of node.
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A helper function
//! \ref FuncNode::exitLabel() exists and returns an exit label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of
//! a function - there should be no code that belongs to the function after
//! this node, but the Compiler doesn't enforce that at the moment.
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of a function - there should be no
//! code that belongs to the function after this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::args().
//! - Function arguments mapped to virtual registers, see \ref FuncNode::argPacks().
//!
//! In a node list, the function and its body looks like the following:
//!
@@ -439,29 +396,30 @@ public:
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the compiler by \ref BaseCompiler::addFunc() it
//! actually inserts 3 nodes (FuncNode, ExitLabel, and FuncEnd) and sets the
//! current cursor to be FuncNode. When \ref BaseCompiler::endFunc() is called
//! the cursor is set to FuncEnd. This guarantees that user can use ExitLabel
//! as a marker after additional code or data can be placed, and it's a common
//! practice.
//! When a function is added to the instruction stream by \ref BaseCompiler::addFunc() it actually inserts 3 nodes
//! (FuncNode, ExitLabel, and FuncEnd) and sets the current cursor to be FuncNode. When \ref BaseCompiler::endFunc()
//! is called the cursor is set to FuncEnd. This guarantees that user can use ExitLabel as a marker after additional
//! code or data can be placed, which is a common practice.
class FuncNode : public LabelNode {
public:
ASMJIT_NONCOPYABLE(FuncNode)
//! Arguments pack.
struct ArgPack {
VirtReg* _data[Globals::kMaxValuePack];
RegOnly _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex] = nullptr;
_data[valueIndex].reset();
}
inline VirtReg*& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline VirtReg* const& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
inline RegOnly& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline const RegOnly& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function frame.
@@ -470,24 +428,25 @@ public:
LabelNode* _exitNode;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
ArgPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create `FuncNode`.
ASMJIT_INLINE FuncNode(BaseBuilder* cb) noexcept
//! Always use `BaseCompiler::addFunc()` to create a new `FuncNode`.
inline FuncNode(BaseBuilder* cb) noexcept
: LabelNode(cb),
_funcDetail(),
_frame(),
_exitNode(nullptr),
_end(nullptr),
_args(nullptr) {
setType(kNodeFunc);
setType(NodeType::kFunc);
}
//! \}
@@ -500,12 +459,12 @@ public:
//! Returns function exit label.
inline Label exitLabel() const noexcept { return _exitNode->label(); }
//! Returns "End of Func" sentinel.
//! Returns "End of Func" sentinel node.
inline SentinelNode* endNode() const noexcept { return _end; }
//! Returns function declaration.
//! Returns function detail.
inline FuncDetail& detail() noexcept { return _funcDetail; }
//! Returns function declaration.
//! Returns function detail.
inline const FuncDetail& detail() const noexcept { return _funcDetail; }
//! Returns function frame.
@@ -513,14 +472,19 @@ public:
//! Returns function frame.
inline const FuncFrame& frame() const noexcept { return _frame; }
//! Tests whether the function has a return value.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns function attributes.
inline FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
inline void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
//! Returns arguments count.
inline uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
//! Returns argument packs.
inline ArgPack* argPacks() const noexcept { return _args; }
//! Tests whether the function has a return value.
inline bool hasRet() const noexcept { return _funcDetail.hasRet(); }
//! Returns argument pack at `argIndex`.
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
@@ -528,15 +492,27 @@ public:
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, VirtReg* vReg) noexcept {
inline void setArg(size_t argIndex, const BaseReg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0] = vReg;
_args[argIndex][0].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, VirtReg* vReg) noexcept {
inline void setArg(size_t argIndex, size_t valueIndex, const BaseReg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = vReg;
_args[argIndex][valueIndex].init(vReg);
}
//! \overload
inline void setArg(size_t argIndex, size_t valueIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
}
//! Resets argument pack at `argIndex`.
@@ -548,21 +524,12 @@ public:
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = nullptr;
_args[argIndex][valueIndex].reset();
}
//! Returns function attributes.
inline uint32_t attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
inline void addAttributes(uint32_t attrs) noexcept { _frame.addAttributes(attrs); }
//! \}
};
// ============================================================================
// [asmjit::FuncRetNode]
// ============================================================================
//! Function return, used by \ref BaseCompiler.
class FuncRetNode : public InstNode {
public:
@@ -572,27 +539,21 @@ public:
//! \{
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, 0, 0) {
_any._nodeType = kNodeFuncRet;
inline FuncRetNode(BaseBuilder* cb) noexcept : InstNode(cb, BaseInst::kIdAbstract, InstOptions::kNone, 0) {
_any._nodeType = NodeType::kFuncRet;
}
//! \}
};
// ============================================================================
// [asmjit::InvokeNode]
// ============================================================================
//! Function invocation, used by \ref BaseCompiler.
class InvokeNode : public InstNode {
public:
ASMJIT_NONCOPYABLE(InvokeNode)
//! Operand pack provides multiple operands that can be associated with a
//! single return value of function argument. Sometimes this is necessary to
//! express an argument or return value that requires multiple registers, for
//! example 64-bit value in 32-bit mode or passing / returning homogeneous data
//! structures.
//! Operand pack provides multiple operands that can be associated with a single return value of function
//! argument. Sometims this is necessary to express an argument or return value that requires multiple
//! registers, for example 64-bit value in 32-bit mode or passing / returning homogeneous data structures.
struct OperandPack {
//! Operands.
Operand_ _data[Globals::kMaxValuePack];
@@ -616,6 +577,9 @@ public:
}
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
//! Function return value(s).
@@ -623,18 +587,20 @@ public:
//! Function arguments.
OperandPack* _args;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(BaseBuilder* cb, uint32_t instId, uint32_t options) noexcept
inline InvokeNode(BaseBuilder* cb, InstId instId, InstOptions options) noexcept
: InstNode(cb, instId, options, kBaseOpCapacity),
_funcDetail(),
_args(nullptr) {
setType(kNodeInvoke);
setType(NodeType::kInvoke);
_resetOps();
_rets.reset();
addFlags(kFlagIsRemovable);
addFlags(NodeFlags::kIsRemovable);
}
//! \}
@@ -718,10 +684,6 @@ public:
//! \}
};
// ============================================================================
// [asmjit::FuncPass]
// ============================================================================
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
@@ -743,7 +705,7 @@ public:
//! \}
//! \name Run
//! \name Pass Interface
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
@@ -755,6 +717,18 @@ public:
//! \}
};
#if !defined(ASMJIT_NO_DEPRECATED)
inline Error BaseCompiler::_setArg(size_t argIndex, size_t valueIndex, const BaseReg& reg) {
FuncNode* func = _func;
if (ASMJIT_UNLIKELY(!func))
return reportError(DebugUtils::errored(kErrorInvalidState));
func->setArg(argIndex, valueIndex, reg);
return kErrorOk;
}
#endif
//! \}
ASMJIT_END_NAMESPACE

View File

@@ -1,63 +1,41 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/operand.h"
#include "../core/type.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [Forward Declarations]
// ============================================================================
class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
// ============================================================================
// [asmjit::VirtReg]
// ============================================================================
//! Virtual register data, managed by \ref BaseCompiler.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! \name Members
//! \{
//! Virtual register signature.
OperandSignature _signature {};
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register info (signature).
RegInfo _info = {};
//! Virtual register size (can be smaller than `regInfo._size`).
//! Virtual register size (can be smaller than `_signature._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
uint8_t _typeId = 0;
TypeId _typeId = TypeId::kVoid;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
@@ -69,24 +47,23 @@ public:
//! Virtual register name (user provided or automatically generated).
ZoneString<16> _name {};
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized
// when the VirtReg is created to NULL pointers and then changed during RAPass
// execution. RAPass sets them back to NULL before it returns.
// -------------------------------------------------------------------------
// The following members are used exclusively by RAPass. They are initialized when the VirtReg is created to
// null pointers and then changed during RAPass execution. RAPass sets them back to NULL before it returns.
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
inline VirtReg(uint32_t id, uint32_t signature, uint32_t virtSize, uint32_t alignment, uint32_t typeId) noexcept
: _id(id),
_info { signature },
inline VirtReg(const OperandSignature& signature, uint32_t id, uint32_t virtSize, uint32_t alignment, TypeId typeId) noexcept
: _signature(signature),
_id(id),
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(uint8_t(typeId)),
_typeId(typeId),
_isFixed(false),
_isStack(false),
_reserved(0) {}
@@ -104,56 +81,50 @@ public:
//! Returns the size of the virtual register name.
inline uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register information that wraps the register signature.
inline const RegInfo& info() const noexcept { return _info; }
//! Returns a register signature of this virtual register.
inline OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well).
inline uint32_t type() const noexcept { return _info.type(); }
inline RegType type() const noexcept { return _signature.regType(); }
//! Returns a virtual register group (maps to the physical register group as well).
inline uint32_t group() const noexcept { return _info.group(); }
inline RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single
//! precision floating point value then its virtSize would be 4, however, the
//! `regSize` would still say 16 (128-bits), because it's the smallest size
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
inline uint32_t regSize() const noexcept { return _info.size(); }
//! Returns a register signature of this virtual register.
inline uint32_t signature() const noexcept { return _info.signature(); }
inline uint32_t regSize() const noexcept { return _signature.size(); }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register
//! needs to store its content. It can be smaller than the physical register
//! size, see `regSize()`.
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`.
inline uint32_t virtSize() const noexcept { return _virtSize; }
//! Returns the virtual register alignment.
inline uint32_t alignment() const noexcept { return _alignment; }
//! Returns the virtual register type id, see `Type::Id`.
inline uint32_t typeId() const noexcept { return _typeId; }
//! Returns the virtual register type id.
inline TypeId typeId() const noexcept { return _typeId; }
//! Returns the virtual register weight - the register allocator can use it
//! as explicit hint for alloc/spill decisions.
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions.
inline uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can
//! use it as explicit hint for alloc/spill decisions and initial bin-packing.
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing.
inline void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed
//! physical register (and never reallocated).
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
inline bool isFixed() const noexcept { return bool(_isFixed); }
//! Returns whether the virtual register is indeed a stack that only uses
//! the virtual register id for making it accessible.
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//!
//! \note It's an error if a stack is accessed as a register.
inline bool isStack() const noexcept { return bool(_isStack); }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
inline bool hasWorkReg() const noexcept { return _workReg != nullptr; }
inline RAWorkReg* workReg() const noexcept { return _workReg; }
inline void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/constpool.h"
@@ -27,16 +9,14 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ConstPool - Construction / Destruction]
// ============================================================================
// ConstPool - Construction & Destruction
// ======================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::~ConstPool() noexcept {}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
// ConstPool - Reset
// =================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
@@ -55,11 +35,10 @@ void ConstPool::reset(Zone* zone) noexcept {
_minItemSize = 0;
}
// ============================================================================
// [asmjit::ConstPool - Ops]
// ============================================================================
// ConstPool - Operations
// ======================
static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
if (!gap)
return self->_zone->allocT<ConstPool::Gap>();
@@ -68,7 +47,7 @@ static ASMJIT_INLINE ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcep
return gap;
}
static ASMJIT_INLINE void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
static inline void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
}
@@ -80,7 +59,11 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
size_t gapIndex;
size_t gapSize;
if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
if (size >= 32 && Support::isAligned<size_t>(offset, 32)) {
gapIndex = ConstPool::kIndex32;
gapSize = 32;
}
else if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
}
@@ -101,9 +84,8 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
gapSize = 1;
}
// We don't have to check for errors here, if this failed nothing really
// happened (just the gap won't be visible) and it will fail again at
// place where the same check would generate `kErrorOutOfMemory` error.
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap)
return;
@@ -122,7 +104,9 @@ static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexce
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
size_t treeIndex;
if (size == 32)
if (size == 64)
treeIndex = kIndex64;
else if (size == 32)
treeIndex = kIndex32;
else if (size == 16)
treeIndex = kIndex16;
@@ -143,8 +127,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
return kErrorOk;
}
// Before incrementing the current offset try if there is a gap that can
// be used for the requested data.
// Before incrementing the current offset try if there is a gap that can be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
@@ -172,8 +155,7 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly
// to the 'size'.
// Get how many bytes have to be skipped so the address is aligned accordingly to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
if (diff != 0) {
@@ -195,9 +177,8 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
dstOffset = offset;
// Now create a bunch of shared constants that are based on the data pattern.
// We stop at size 4, it probably doesn't make sense to split constants down
// to 1 byte.
// Now create a bunch of shared constants that are based on the data pattern. We stop at size 4,
// it probably doesn't make sense to split constants down to 1 byte.
size_t pCount = 1;
size_t smallerSize = size;
@@ -226,9 +207,8 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
return kErrorOk;
}
// ============================================================================
// [asmjit::ConstPool - Reset]
// ============================================================================
// ConstPool - Reset
// =================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
@@ -255,9 +235,8 @@ void ConstPool::fill(void* dst) const noexcept {
}
}
// ============================================================================
// [asmjit::ConstPool - Unit]
// ============================================================================
// ConstPool - Tests
// =================
#if defined(ASMJIT_TEST)
UNIT(const_pool) {

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
@@ -33,23 +15,22 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::ConstPool]
// ============================================================================
//! Constant pool scope.
enum class ConstPoolScope : uint32_t {
//! Local constant, always embedded right after the current function.
kLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kGlobal = 1,
//! Maximum value of `ConstPoolScope`.
kMaxValue = kGlobal
};
//! Constant pool.
class ConstPool {
public:
ASMJIT_NONCOPYABLE(ConstPool)
//! Constant pool scope.
enum Scope : uint32_t {
//! Local constant, always embedded right after the current function.
kScopeLocal = 0,
//! Global constant, embedded at the end of the currently compiled code.
kScopeGlobal = 1
};
//! \cond INTERNAL
//! Index of a given size in const-pool table.
@@ -60,7 +41,8 @@ public:
kIndex8 = 3,
kIndex16 = 4,
kIndex32 = 5,
kIndexCount = 6
kIndex64 = 6,
kIndexCount = 7
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
@@ -193,6 +175,9 @@ public:
//! \endcond
//! \name Members
//! \{
//! Zone allocator.
Zone* _zone;
//! Tree per size.
@@ -209,6 +194,8 @@ public:
//! Minimum item size in the pool.
size_t _minItemSize;
//! \}
//! \name Construction & Destruction
//! \{
@@ -238,21 +225,18 @@ public:
//! Adds a constant to the constant pool.
//!
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes.
//! The constant is added to the pool only if it doesn't not exist, otherwise
//! cached value is returned.
//! The constant must have known size, which is 1, 2, 4, 8, 16 or 32 bytes. The constant is added to the pool only
//! if it doesn't not exist, otherwise cached value is returned.
//!
//! AsmJit is able to subdivide added constants, so for example if you add
//! 8-byte constant 0x1122334455667788 it will create the following slots:
//! AsmJit is able to subdivide added constants, so for example if you add 8-byte constant 0x1122334455667788 it
//! will create the following slots:
//!
//! 8-byte: 0x1122334455667788
//! 4-byte: 0x11223344, 0x55667788
//!
//! The reason is that when combining MMX/SSE/AVX code some patterns are used
//! frequently. However, AsmJit is not able to reallocate a constant that has
//! been already added. For example if you try to add 4-byte constant and then
//! 8-byte constant having the same 4-byte pattern as the previous one, two
//! independent slots will be generated by the pool.
//! The reason is that when combining MMX/SSE/AVX code some patterns are used frequently. However, AsmJit is not
//! able to reallocate a constant that has been already added. For example if you try to add 4-byte constant and
//! then 8-byte constant having the same 4-byte pattern as the previous one, two independent slots will be used.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
//! Fills the destination with the content of this constant pool.

File diff suppressed because it is too large Load Diff

View File

@@ -1,52 +1,679 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_CPUINFO_H_INCLUDED
#define ASMJIT_CORE_CPUINFO_H_INCLUDED
#include "../core/archtraits.h"
#include "../core/features.h"
#include "../core/environment.h"
#include "../core/globals.h"
#include "../core/string.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CpuInfo]
// ============================================================================
//! CPU features information.
//!
//! Each feature is represented by a single bit in an embedded bit array.
class CpuFeatures {
public:
//! A word that is used to represents feature bits.
typedef Support::BitWord BitWord;
//! Iterator that can iterate all CPU features set.
typedef Support::BitVectorIterator<BitWord> Iterator;
//! \name Constants
//! \{
//! \cond INTERNAL
enum : uint32_t {
kMaxFeatures = 256,
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
//! \endcond
//! \}
//! \name Data
//! \{
//! CPU features data.
struct Data {
//! \name Members
//! \{
//! Data bits.
Support::Array<BitWord, kNumBitWords> _bits;
//! \}
//! \name Overloaded Operators
//! \{
inline bool operator==(const Data& other) noexcept { return eq(other); }
inline bool operator!=(const Data& other) noexcept { return !eq(other); }
//! \}
//! \name Accessors
//! \{
//! Returns true if there are no features set.
inline bool empty() const noexcept { return _bits.aggregate<Support::Or>(0) == 0; }
//! Returns all features as array of bitwords (see \ref Support::BitWord).
inline BitWord* bits() noexcept { return _bits.data(); }
//! Returns all features as array of bitwords (const).
inline const BitWord* bits() const noexcept { return _bits.data(); }
//! Returns the number of BitWords returned by \ref bits().
inline size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently.
inline Iterator iterator() const noexcept { return Iterator(_bits.data(), kNumBitWords); }
//! Tests whether the feature `featureId` is present.
template<typename FeatureId>
ASMJIT_FORCE_INLINE bool has(const FeatureId& featureId) const noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
uint32_t bit = uint32_t(featureId) % Support::kBitWordSizeInBits;
return bool((_bits[idx] >> bit) & 0x1);
}
//! Tests whether all features as defined by `other` are present.
ASMJIT_FORCE_INLINE bool hasAll(const Data& other) const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if ((_bits[i] & other._bits[i]) != other._bits[i])
return false;
return true;
}
//! \}
//! \name Manipulation
//! \{
inline void reset() noexcept { _bits.fill(0); }
//! Adds the given CPU `featureId` to the list of features.
template<typename FeatureId>
ASMJIT_FORCE_INLINE void add(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
uint32_t bit = uint32_t(featureId) % Support::kBitWordSizeInBits;
_bits[idx] |= BitWord(1) << bit;
}
template<typename FeatureId, typename... Args>
ASMJIT_FORCE_INLINE void add(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
add(featureId);
add(std::forward<Args>(otherFeatureIds)...);
}
template<typename FeatureId>
ASMJIT_FORCE_INLINE void addIf(bool condition, const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
uint32_t bit = uint32_t(featureId) % Support::kBitWordSizeInBits;
_bits[idx] |= BitWord(condition) << bit;
}
template<typename FeatureId, typename... Args>
ASMJIT_FORCE_INLINE void addIf(bool condition, const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
addIf(condition, featureId);
addIf(condition, std::forward<Args>(otherFeatureIds)...);
}
//! Removes the given CPU `featureId` from the list of features.
template<typename FeatureId>
ASMJIT_FORCE_INLINE void remove(const FeatureId& featureId) noexcept {
ASMJIT_ASSERT(uint32_t(featureId) < kMaxFeatures);
uint32_t idx = uint32_t(featureId) / Support::kBitWordSizeInBits;
uint32_t bit = uint32_t(featureId) % Support::kBitWordSizeInBits;
_bits[idx] &= ~(BitWord(1) << bit);
}
template<typename FeatureId, typename... Args>
ASMJIT_FORCE_INLINE void remove(const FeatureId& featureId, Args&&... otherFeatureIds) noexcept {
remove(featureId);
remove(std::forward<Args>(otherFeatureIds)...);
}
//! Tests whether this CPU features data matches `other`.
ASMJIT_FORCE_INLINE bool eq(const Data& other) const noexcept { return _bits == other._bits; }
//! \}
};
//! X86 specific features data.
struct X86 : public Data {
//! X86 CPU feature identifiers.
enum Id : uint8_t {
// @EnumValuesBegin{"enum": "CpuFeatures::X86"}@
kNone, //!< No feature (never set, used internally).
kMT, //!< CPU has multi-threading capabilities.
kNX, //!< CPU has Not-Execute-Bit aka DEP (data-execution prevention).
k3DNOW, //!< CPU has 3DNOW (3DNOW base instructions) [AMD].
k3DNOW2, //!< CPU has 3DNOW2 (enhanced 3DNOW) [AMD].
kADX, //!< CPU has ADX (multi-precision add-carry instruction extensions).
kAESNI, //!< CPU has AESNI (AES encode/decode instructions).
kALTMOVCR8, //!< CPU has LOCK MOV R<->CR0 (supports `MOV R<->CR8` via `LOCK MOV R<->CR0` in 32-bit mode) [AMD].
kAMX_BF16, //!< CPU has AMX_BF16 (advanced matrix extensions - BF16 instructions).
kAMX_INT8, //!< CPU has AMX_INT8 (advanced matrix extensions - INT8 instructions).
kAMX_TILE, //!< CPU has AMX_TILE (advanced matrix extensions).
kAVX, //!< CPU has AVX (advanced vector extensions).
kAVX2, //!< CPU has AVX2 (advanced vector extensions 2).
kAVX512_4FMAPS, //!< CPU has AVX512_FMAPS (FMA packed single).
kAVX512_4VNNIW, //!< CPU has AVX512_VNNIW (vector NN instructions word variable precision).
kAVX512_BF16, //!< CPU has AVX512_BF16 (BFLOAT16 support instruction).
kAVX512_BITALG, //!< CPU has AVX512_BITALG (VPOPCNT[B|W], VPSHUFBITQMB).
kAVX512_BW, //!< CPU has AVX512_BW (packed BYTE|WORD).
kAVX512_CDI, //!< CPU has AVX512_CDI (conflict detection).
kAVX512_DQ, //!< CPU has AVX512_DQ (packed DWORD|QWORD).
kAVX512_ERI, //!< CPU has AVX512_ERI (exponential and reciprocal).
kAVX512_F, //!< CPU has AVX512_F (AVX512 foundation).
kAVX512_FP16, //!< CPU has AVX512_FP16 (FP16 extensions).
kAVX512_IFMA, //!< CPU has AVX512_IFMA (integer fused-multiply-add using 52-bit precision).
kAVX512_PFI, //!< CPU has AVX512_PFI (prefetch instructions).
kAVX512_VBMI, //!< CPU has AVX512_VBMI (vector byte manipulation).
kAVX512_VBMI2, //!< CPU has AVX512_VBMI2 (vector byte manipulation 2).
kAVX512_VL, //!< CPU has AVX512_VL (vector length extensions).
kAVX512_VNNI, //!< CPU has AVX512_VNNI (vector neural network instructions).
kAVX512_VP2INTERSECT, //!< CPU has AVX512_VP2INTERSECT
kAVX512_VPOPCNTDQ, //!< CPU has AVX512_VPOPCNTDQ (VPOPCNT[D|Q] instructions).
kAVX_VNNI, //!< CPU has AVX_VNNI (VEX encoding of vpdpbusd/vpdpbusds/vpdpwssd/vpdpwssds).
kBMI, //!< CPU has BMI (bit manipulation instructions #1).
kBMI2, //!< CPU has BMI2 (bit manipulation instructions #2).
kCET_IBT, //!< CPU has CET-IBT (indirect branch tracking).
kCET_SS, //!< CPU has CET-SS.
kCLDEMOTE, //!< CPU has CLDEMOTE (cache line demote).
kCLFLUSH, //!< CPU has CLFUSH (Cache Line flush).
kCLFLUSHOPT, //!< CPU has CLFUSHOPT (Cache Line flush - optimized).
kCLWB, //!< CPU has CLWB.
kCLZERO, //!< CPU has CLZERO.
kCMOV, //!< CPU has CMOV (CMOV and FCMOV instructions).
kCMPXCHG16B, //!< CPU has CMPXCHG16B (compare-exchange 16 bytes) [X86_64].
kCMPXCHG8B, //!< CPU has CMPXCHG8B (compare-exchange 8 bytes).
kENCLV, //!< CPU has ENCLV.
kENQCMD, //!< CPU has ENQCMD (enqueue stores).
kERMS, //!< CPU has ERMS (enhanced REP MOVSB/STOSB).
kF16C, //!< CPU has F16C.
kFMA, //!< CPU has FMA (fused-multiply-add 3 operand form).
kFMA4, //!< CPU has FMA4 (fused-multiply-add 4 operand form).
kFPU, //!< CPU has FPU (FPU support).
kFSGSBASE, //!< CPU has FSGSBASE.
kFXSR, //!< CPU has FXSR (FXSAVE/FXRSTOR instructions).
kFXSROPT, //!< CPU has FXSROTP (FXSAVE/FXRSTOR is optimized).
kGEODE, //!< CPU has GEODE extensions (3DNOW additions).
kGFNI, //!< CPU has GFNI (Galois field instructions).
kHLE, //!< CPU has HLE.
kHRESET, //!< CPU has HRESET.
kI486, //!< CPU has I486 features (I486+ support).
kLAHFSAHF, //!< CPU has LAHF/SAHF (LAHF/SAHF in 64-bit mode) [X86_64].
kLWP, //!< CPU has LWP (lightweight profiling) [AMD].
kLZCNT, //!< CPU has LZCNT (LZCNT instruction).
kMCOMMIT, //!< CPU has MCOMMIT (MCOMMIT instruction).
kMMX, //!< CPU has MMX (MMX base instructions).
kMMX2, //!< CPU has MMX2 (MMX extensions or MMX2).
kMONITOR, //!< CPU has MONITOR (MONITOR/MWAIT instructions).
kMONITORX, //!< CPU has MONITORX (MONITORX/MWAITX instructions).
kMOVBE, //!< CPU has MOVBE (move with byte-order swap).
kMOVDIR64B, //!< CPU has MOVDIR64B (move 64 bytes as direct store).
kMOVDIRI, //!< CPU has MOVDIRI (move dword/qword as direct store).
kMPX, //!< CPU has MPX (memory protection extensions).
kMSR, //!< CPU has MSR (RDMSR/WRMSR instructions).
kMSSE, //!< CPU has MSSE (misaligned SSE support).
kOSXSAVE, //!< CPU has OSXSAVE (XSAVE enabled by OS).
kOSPKE, //!< CPU has OSPKE (PKE enabled by OS).
kPCLMULQDQ, //!< CPU has PCLMULQDQ (packed carry-less multiplication).
kPCONFIG, //!< CPU has PCONFIG (PCONFIG instruction).
kPOPCNT, //!< CPU has POPCNT (POPCNT instruction).
kPREFETCHW, //!< CPU has PREFETCHW.
kPREFETCHWT1, //!< CPU has PREFETCHWT1.
kPTWRITE, //!< CPU has PTWRITE.
kRDPID, //!< CPU has RDPID.
kRDPRU, //!< CPU has RDPRU.
kRDRAND, //!< CPU has RDRAND.
kRDSEED, //!< CPU has RDSEED.
kRDTSC, //!< CPU has RDTSC.
kRDTSCP, //!< CPU has RDTSCP.
kRTM, //!< CPU has RTM.
kSERIALIZE, //!< CPU has SERIALIZE.
kSHA, //!< CPU has SHA (SHA-1 and SHA-256 instructions).
kSKINIT, //!< CPU has SKINIT (SKINIT/STGI instructions) [AMD].
kSMAP, //!< CPU has SMAP (supervisor-mode access prevention).
kSMEP, //!< CPU has SMEP (supervisor-mode execution prevention).
kSMX, //!< CPU has SMX (safer mode extensions).
kSNP, //!< CPU has SNP.
kSSE, //!< CPU has SSE.
kSSE2, //!< CPU has SSE2.
kSSE3, //!< CPU has SSE3.
kSSE4_1, //!< CPU has SSE4.1.
kSSE4_2, //!< CPU has SSE4.2.
kSSE4A, //!< CPU has SSE4A [AMD].
kSSSE3, //!< CPU has SSSE3.
kSVM, //!< CPU has SVM (virtualization) [AMD].
kTBM, //!< CPU has TBM (trailing bit manipulation) [AMD].
kTSX, //!< CPU has TSX.
kTSXLDTRK, //!< CPU has TSXLDTRK.
kUINTR, //!< CPU has UINTR (user interrupts).
kVAES, //!< CPU has VAES (vector AES 256|512 bit support).
kVMX, //!< CPU has VMX (virtualization) [INTEL].
kVPCLMULQDQ, //!< CPU has VPCLMULQDQ (vector PCLMULQDQ 256|512-bit support).
kWAITPKG, //!< CPU has WAITPKG (UMONITOR, UMWAIT, TPAUSE).
kWBNOINVD, //!< CPU has WBNOINVD.
kXOP, //!< CPU has XOP (XOP instructions) [AMD].
kXSAVE, //!< CPU has XSAVE.
kXSAVEC, //!< CPU has XSAVEC.
kXSAVEOPT, //!< CPU has XSAVEOPT.
kXSAVES, //!< CPU has XSAVES.
// @EnumValuesEnd@
kMaxValue = kXSAVES
};
#define ASMJIT_X86_FEATURE(FEATURE) \
inline bool has##FEATURE() const noexcept { return has(X86::k##FEATURE); }
ASMJIT_X86_FEATURE(MT)
ASMJIT_X86_FEATURE(NX)
ASMJIT_X86_FEATURE(3DNOW)
ASMJIT_X86_FEATURE(3DNOW2)
ASMJIT_X86_FEATURE(ADX)
ASMJIT_X86_FEATURE(AESNI)
ASMJIT_X86_FEATURE(ALTMOVCR8)
ASMJIT_X86_FEATURE(AMX_BF16)
ASMJIT_X86_FEATURE(AMX_INT8)
ASMJIT_X86_FEATURE(AMX_TILE)
ASMJIT_X86_FEATURE(AVX)
ASMJIT_X86_FEATURE(AVX2)
ASMJIT_X86_FEATURE(AVX512_4FMAPS)
ASMJIT_X86_FEATURE(AVX512_4VNNIW)
ASMJIT_X86_FEATURE(AVX512_BF16)
ASMJIT_X86_FEATURE(AVX512_BITALG)
ASMJIT_X86_FEATURE(AVX512_BW)
ASMJIT_X86_FEATURE(AVX512_CDI)
ASMJIT_X86_FEATURE(AVX512_DQ)
ASMJIT_X86_FEATURE(AVX512_ERI)
ASMJIT_X86_FEATURE(AVX512_F)
ASMJIT_X86_FEATURE(AVX512_FP16)
ASMJIT_X86_FEATURE(AVX512_IFMA)
ASMJIT_X86_FEATURE(AVX512_PFI)
ASMJIT_X86_FEATURE(AVX512_VBMI)
ASMJIT_X86_FEATURE(AVX512_VBMI2)
ASMJIT_X86_FEATURE(AVX512_VL)
ASMJIT_X86_FEATURE(AVX512_VNNI)
ASMJIT_X86_FEATURE(AVX512_VP2INTERSECT)
ASMJIT_X86_FEATURE(AVX512_VPOPCNTDQ)
ASMJIT_X86_FEATURE(AVX_VNNI)
ASMJIT_X86_FEATURE(BMI)
ASMJIT_X86_FEATURE(BMI2)
ASMJIT_X86_FEATURE(CET_IBT)
ASMJIT_X86_FEATURE(CET_SS)
ASMJIT_X86_FEATURE(CLDEMOTE)
ASMJIT_X86_FEATURE(CLFLUSH)
ASMJIT_X86_FEATURE(CLFLUSHOPT)
ASMJIT_X86_FEATURE(CLWB)
ASMJIT_X86_FEATURE(CLZERO)
ASMJIT_X86_FEATURE(CMOV)
ASMJIT_X86_FEATURE(CMPXCHG16B)
ASMJIT_X86_FEATURE(CMPXCHG8B)
ASMJIT_X86_FEATURE(ENCLV)
ASMJIT_X86_FEATURE(ENQCMD)
ASMJIT_X86_FEATURE(ERMS)
ASMJIT_X86_FEATURE(F16C)
ASMJIT_X86_FEATURE(FMA)
ASMJIT_X86_FEATURE(FMA4)
ASMJIT_X86_FEATURE(FPU)
ASMJIT_X86_FEATURE(FSGSBASE)
ASMJIT_X86_FEATURE(FXSR)
ASMJIT_X86_FEATURE(FXSROPT)
ASMJIT_X86_FEATURE(GEODE)
ASMJIT_X86_FEATURE(GFNI)
ASMJIT_X86_FEATURE(HLE)
ASMJIT_X86_FEATURE(HRESET)
ASMJIT_X86_FEATURE(I486)
ASMJIT_X86_FEATURE(LAHFSAHF)
ASMJIT_X86_FEATURE(LWP)
ASMJIT_X86_FEATURE(LZCNT)
ASMJIT_X86_FEATURE(MCOMMIT)
ASMJIT_X86_FEATURE(MMX)
ASMJIT_X86_FEATURE(MMX2)
ASMJIT_X86_FEATURE(MONITOR)
ASMJIT_X86_FEATURE(MONITORX)
ASMJIT_X86_FEATURE(MOVBE)
ASMJIT_X86_FEATURE(MOVDIR64B)
ASMJIT_X86_FEATURE(MOVDIRI)
ASMJIT_X86_FEATURE(MPX)
ASMJIT_X86_FEATURE(MSR)
ASMJIT_X86_FEATURE(MSSE)
ASMJIT_X86_FEATURE(OSXSAVE)
ASMJIT_X86_FEATURE(OSPKE)
ASMJIT_X86_FEATURE(PCLMULQDQ)
ASMJIT_X86_FEATURE(PCONFIG)
ASMJIT_X86_FEATURE(POPCNT)
ASMJIT_X86_FEATURE(PREFETCHW)
ASMJIT_X86_FEATURE(PREFETCHWT1)
ASMJIT_X86_FEATURE(PTWRITE)
ASMJIT_X86_FEATURE(RDPID)
ASMJIT_X86_FEATURE(RDPRU)
ASMJIT_X86_FEATURE(RDRAND)
ASMJIT_X86_FEATURE(RDSEED)
ASMJIT_X86_FEATURE(RDTSC)
ASMJIT_X86_FEATURE(RDTSCP)
ASMJIT_X86_FEATURE(RTM)
ASMJIT_X86_FEATURE(SERIALIZE)
ASMJIT_X86_FEATURE(SHA)
ASMJIT_X86_FEATURE(SKINIT)
ASMJIT_X86_FEATURE(SMAP)
ASMJIT_X86_FEATURE(SMEP)
ASMJIT_X86_FEATURE(SMX)
ASMJIT_X86_FEATURE(SNP)
ASMJIT_X86_FEATURE(SSE)
ASMJIT_X86_FEATURE(SSE2)
ASMJIT_X86_FEATURE(SSE3)
ASMJIT_X86_FEATURE(SSE4_1)
ASMJIT_X86_FEATURE(SSE4_2)
ASMJIT_X86_FEATURE(SSE4A)
ASMJIT_X86_FEATURE(SSSE3)
ASMJIT_X86_FEATURE(SVM)
ASMJIT_X86_FEATURE(TBM)
ASMJIT_X86_FEATURE(TSX)
ASMJIT_X86_FEATURE(TSXLDTRK)
ASMJIT_X86_FEATURE(UINTR)
ASMJIT_X86_FEATURE(VAES)
ASMJIT_X86_FEATURE(VMX)
ASMJIT_X86_FEATURE(VPCLMULQDQ)
ASMJIT_X86_FEATURE(WAITPKG)
ASMJIT_X86_FEATURE(WBNOINVD)
ASMJIT_X86_FEATURE(XOP)
ASMJIT_X86_FEATURE(XSAVE)
ASMJIT_X86_FEATURE(XSAVEC)
ASMJIT_X86_FEATURE(XSAVEOPT)
ASMJIT_X86_FEATURE(XSAVES)
#undef ASMJIT_X86_FEATURE
};
//! ARM specific features data.
struct ARM : public Data {
//! ARM CPU feature identifiers.
enum Id : uint8_t {
// @EnumValuesBegin{"enum": "CpuFeatures::ARM"}@
kNone = 0, //!< No feature (never set, used internally).
kTHUMB, //!< THUMB v1 ISA.
kTHUMBv2, //!< THUMB v2 ISA.
kARMv6, //!< ARMv6 ISA.
kARMv7, //!< ARMv7 ISA.
kARMv8a, //!< ARMv8-A ISA.
kARMv8_1a, //!< ARMv8.1-A ISA.
kARMv8_2a, //!< ARMv8.2-A ISA.
kARMv8_3a, //!< ARMv8.3-A ISA.
kARMv8_4a, //!< ARMv8.4-A ISA.
kARMv8_5a, //!< ARMv8.5-A ISA.
kARMv8_6a, //!< ARMv8.6-A ISA.
kARMv8_7a, //!< ARMv8.6-A ISA.
kVFPv2, //!< CPU has VFPv2 instruction set.
kVFPv3, //!< CPU has VFPv3 instruction set.
kVFPv4, //!< CPU has VFPv4 instruction set.
kVFP_D32, //!< CPU has 32 VFP-D (64-bit) registers.
kAES, //!< CPU has AES (AArch64 only).
kALTNZCV, //!< CPU has ALTNZCV (AArch64 only).
kASIMD, //!< CPU has Advanced SIMD (NEON on ARM/THUMB).
kBF16, //!< CPU has BF16 (AArch64 only).
kBTI, //!< CPU has BTI (branch target identification).
kCPUID, //!< CPU has accessible CPUID register (ID_AA64ZFR0_EL1).
kCRC32, //!< CPU has CRC32 .
kDGH, //!< CPU has DGH (AArch64 only).
kDIT, //!< CPU has data independent timing instructions (DIT).
kDOTPROD, //!< CPU has DOTPROD (SDOT/UDOT).
kEDSP, //!< CPU has EDSP (ARM/THUMB only).
kFCMA, //!< CPU has FCMA (FCADD/FCMLA).
kFJCVTZS, //!< CPU has FJCVTZS (AArch64 only).
kFLAGM, //!< CPU has FLAGM (AArch64 only).
kFP16CONV, //!< CPU has FP16 (half-float) conversion.
kFP16FML, //!< CPU has FMLAL{2}/FMLSL{2}
kFP16FULL, //!< CPU has full support for FP16.
kFRINT, //!< CPU has FRINT[32|64][X|Z] (AArch64 only).
kI8MM, //!< CPU has I8MM (AArch64 only).
kIDIVA, //!< CPU has hardware SDIV and UDIV (ARM mode).
kIDIVT, //!< CPU has hardware SDIV and UDIV (THUMB mode).
kLSE, //!< CPU has large system extensions (LSE) (AArch64 only).
kMTE, //!< CPU has MTE (AArch64 only).
kRCPC_IMMO, //!< CPU has RCPC_IMMO (AArch64 only).
kRDM, //!< CPU has RDM (AArch64 only).
kPMU, //!< CPU has PMU (AArch64 only).
kPMULL, //!< CPU has PMULL (AArch64 only).
kRNG, //!< CPU has random number generation (RNG).
kSB, //!< CPU has speculative barrier SB (AArch64 only).
kSHA1, //!< CPU has SHA1.
kSHA2, //!< CPU has SHA2.
kSHA3, //!< CPU has SHA3.
kSHA512, //!< CPU has SHA512.
kSM3, //!< CPU has SM3.
kSM4, //!< CPU has SM4.
kSSBS, //!< CPU has SSBS.
kSVE, //!< CPU has SVE (AArch64 only).
kSVE_BF16, //!< CPU has SVE-BF16 (AArch64 only).
kSVE_F32MM, //!< CPU has SVE-F32MM (AArch64 only).
kSVE_F64MM, //!< CPU has SVE-F64MM (AArch64 only).
kSVE_I8MM, //!< CPU has SVE-I8MM (AArch64 only).
kSVE_PMULL, //!< CPU has SVE-PMULL (AArch64 only).
kSVE2, //!< CPU has SVE2 (AArch64 only).
kSVE2_AES, //!< CPU has SVE2-AES (AArch64 only).
kSVE2_BITPERM, //!< CPU has SVE2-BITPERM (AArch64 only).
kSVE2_SHA3, //!< CPU has SVE2-SHA3 (AArch64 only).
kSVE2_SM4, //!< CPU has SVE2-SM4 (AArch64 only).
kTME, //!< CPU has transactional memory extensions (TME).
// @EnumValuesEnd@
kMaxValue = kTME
};
#define ASMJIT_ARM_FEATURE(FEATURE) \
inline bool has##FEATURE() const noexcept { return has(ARM::k##FEATURE); }
ASMJIT_ARM_FEATURE(THUMB)
ASMJIT_ARM_FEATURE(THUMBv2)
ASMJIT_ARM_FEATURE(ARMv6)
ASMJIT_ARM_FEATURE(ARMv7)
ASMJIT_ARM_FEATURE(ARMv8a)
ASMJIT_ARM_FEATURE(ARMv8_1a)
ASMJIT_ARM_FEATURE(ARMv8_2a)
ASMJIT_ARM_FEATURE(ARMv8_3a)
ASMJIT_ARM_FEATURE(ARMv8_4a)
ASMJIT_ARM_FEATURE(ARMv8_5a)
ASMJIT_ARM_FEATURE(ARMv8_6a)
ASMJIT_ARM_FEATURE(ARMv8_7a)
ASMJIT_ARM_FEATURE(VFPv2)
ASMJIT_ARM_FEATURE(VFPv3)
ASMJIT_ARM_FEATURE(VFPv4)
ASMJIT_ARM_FEATURE(VFP_D32)
ASMJIT_ARM_FEATURE(AES)
ASMJIT_ARM_FEATURE(ALTNZCV)
ASMJIT_ARM_FEATURE(ASIMD)
ASMJIT_ARM_FEATURE(BF16)
ASMJIT_ARM_FEATURE(BTI)
ASMJIT_ARM_FEATURE(CPUID)
ASMJIT_ARM_FEATURE(CRC32)
ASMJIT_ARM_FEATURE(DGH)
ASMJIT_ARM_FEATURE(DIT)
ASMJIT_ARM_FEATURE(DOTPROD)
ASMJIT_ARM_FEATURE(EDSP)
ASMJIT_ARM_FEATURE(FCMA)
ASMJIT_ARM_FEATURE(FLAGM)
ASMJIT_ARM_FEATURE(FP16CONV)
ASMJIT_ARM_FEATURE(FP16FML)
ASMJIT_ARM_FEATURE(FP16FULL)
ASMJIT_ARM_FEATURE(FRINT)
ASMJIT_ARM_FEATURE(IDIVA)
ASMJIT_ARM_FEATURE(IDIVT)
ASMJIT_ARM_FEATURE(LSE)
ASMJIT_ARM_FEATURE(MTE)
ASMJIT_ARM_FEATURE(FJCVTZS)
ASMJIT_ARM_FEATURE(I8MM)
ASMJIT_ARM_FEATURE(RCPC_IMMO)
ASMJIT_ARM_FEATURE(RDM)
ASMJIT_ARM_FEATURE(PMU)
ASMJIT_ARM_FEATURE(PMULL)
ASMJIT_ARM_FEATURE(RNG)
ASMJIT_ARM_FEATURE(SB)
ASMJIT_ARM_FEATURE(SHA1)
ASMJIT_ARM_FEATURE(SHA2)
ASMJIT_ARM_FEATURE(SHA3)
ASMJIT_ARM_FEATURE(SHA512)
ASMJIT_ARM_FEATURE(SM3)
ASMJIT_ARM_FEATURE(SM4)
ASMJIT_ARM_FEATURE(SSBS)
ASMJIT_ARM_FEATURE(SVE)
ASMJIT_ARM_FEATURE(SVE_BF16)
ASMJIT_ARM_FEATURE(SVE_F32MM)
ASMJIT_ARM_FEATURE(SVE_F64MM)
ASMJIT_ARM_FEATURE(SVE_I8MM)
ASMJIT_ARM_FEATURE(SVE_PMULL)
ASMJIT_ARM_FEATURE(SVE2)
ASMJIT_ARM_FEATURE(SVE2_AES)
ASMJIT_ARM_FEATURE(SVE2_BITPERM)
ASMJIT_ARM_FEATURE(SVE2_SHA3)
ASMJIT_ARM_FEATURE(SVE2_SM4)
ASMJIT_ARM_FEATURE(TME)
#undef ASMJIT_ARM_FEATURE
};
static_assert(uint32_t(X86::kMaxValue) < kMaxFeatures, "The number of X86 CPU features cannot exceed CpuFeatures::kMaxFeatures");
static_assert(uint32_t(ARM::kMaxValue) < kMaxFeatures, "The number of ARM CPU features cannot exceed CpuFeatures::kMaxFeatures");
//! \}
//! \name Members
//! \{
Data _data {};
//! \}
//! \name Construction & Destruction
//! \{
inline CpuFeatures() noexcept {}
inline CpuFeatures(const CpuFeatures& other) noexcept = default;
inline explicit CpuFeatures(Globals::NoInit_) noexcept {}
//! \}
//! \name Overloaded Operators
//! \{
inline CpuFeatures& operator=(const CpuFeatures& other) noexcept = default;
inline bool operator==(const CpuFeatures& other) noexcept { return eq(other); }
inline bool operator!=(const CpuFeatures& other) noexcept { return !eq(other); }
//! \}
//! \name Accessors
//! \{
//! Returns true if there are no features set.
inline bool empty() const noexcept { return _data.empty(); }
//! Casts this base class into a derived type `T`.
template<typename T = Data>
inline T& data() noexcept { return static_cast<T&>(_data); }
//! Casts this base class into a derived type `T` (const).
template<typename T = Data>
inline const T& data() const noexcept { return static_cast<const T&>(_data); }
//! Returns CpuFeatures::Data as \ref CpuFeatures::X86.
inline X86& x86() noexcept { return data<X86>(); }
//! Returns CpuFeatures::Data as \ref CpuFeatures::X86 (const).
inline const X86& x86() const noexcept { return data<X86>(); }
//! Returns CpuFeatures::Data as \ref CpuFeatures::ARM.
inline ARM& arm() noexcept { return data<ARM>(); }
//! Returns CpuFeatures::Data as \ref CpuFeatures::ARM (const).
inline const ARM& arm() const noexcept { return data<ARM>(); }
//! Returns all features as array of bitwords (see \ref Support::BitWord).
inline BitWord* bits() noexcept { return _data.bits(); }
//! Returns all features as array of bitwords (const).
inline const BitWord* bits() const noexcept { return _data.bits(); }
//! Returns the number of BitWords returned by \ref bits().
inline size_t bitWordCount() const noexcept { return _data.bitWordCount(); }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate over all features efficiently.
inline Iterator iterator() const noexcept { return _data.iterator(); }
//! Tests whether the feature `featureId` is present.
template<typename FeatureId>
inline bool has(const FeatureId& featureId) const noexcept { return _data.has(featureId); }
//! Tests whether all features as defined by `other` are present.
inline bool hasAll(const CpuFeatures& other) const noexcept { return _data.hasAll(other._data); }
//! \}
//! \name Manipulation
//! \{
inline void reset() noexcept { _data.reset(); }
//! Adds the given CPU `featureId` to the list of features.
template<typename... Args>
inline void add(Args&&... args) noexcept { return _data.add(std::forward<Args>(args)...); }
//! Adds the given CPU `featureId` to the list of features if `condition` is true.
template<typename... Args>
inline void addIf(bool condition, Args&&... args) noexcept { return _data.addIf(condition, std::forward<Args>(args)...); }
//! Removes the given CPU `featureId` from the list of features.
template<typename... Args>
inline void remove(Args&&... args) noexcept { return _data.remove(std::forward<Args>(args)...); }
//! Tests whether this CPU features matches `other`.
inline bool eq(const CpuFeatures& other) const noexcept { return _data.eq(other._data); }
//! \}
};
//! CPU information.
class CpuInfo {
public:
//! \name Members
//! \{
//! Architecture.
uint8_t _arch;
Arch _arch;
//! Sub-architecture.
uint8_t _subArch;
SubArch _subArch;
//! True if the CPU was detected, false if the detection failed or it's not available.
bool _wasDetected;
//! Reserved for future use.
uint16_t _reserved;
uint8_t _reserved;
//! CPU family ID.
uint32_t _familyId;
//! CPU model ID.
@@ -69,7 +696,9 @@ public:
//! CPU brand string.
FixedString<64> _brand;
//! CPU features.
BaseFeatures _features;
CpuFeatures _features;
//! \}
//! \name Construction & Destruction
//! \{
@@ -83,10 +712,10 @@ public:
//! Returns the host CPU information.
ASMJIT_API static const CpuInfo& host() noexcept;
//! Initializes CpuInfo to the given architecture, see \ref Environment.
inline void initArch(uint32_t arch, uint32_t subArch = 0u) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
//! Initializes CpuInfo architecture and sub-architecture members to `arch` and `subArch`, respectively.
inline void initArch(Arch arch, SubArch subArch = SubArch::kUnknown) noexcept {
_arch = arch;
_subArch = subArch;
}
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
@@ -103,46 +732,76 @@ public:
//! \name Accessors
//! \{
//! Returns the CPU architecture id, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the CPU architecture sub-id, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns the CPU architecture this information relates to.
inline Arch arch() const noexcept { return _arch; }
//! Returns the CPU sub-architecture this information relates to.
inline SubArch subArch() const noexcept { return _subArch; }
//! Returns whether the CPU was detected successfully.
//!
//! If the returned value is false it means that AsmJit either failed to detect the CPU or it doesn't have
//! implementation targeting the host architecture and operating system.
inline bool wasDetected() const noexcept { return _wasDetected; }
//! Returns the CPU family ID.
//!
//! Family identifier matches the FamilyId read by using CPUID on X86 architecture.
inline uint32_t familyId() const noexcept { return _familyId; }
//! Returns the CPU model ID.
//!
//! Family identifier matches the ModelId read by using CPUID on X86 architecture.
inline uint32_t modelId() const noexcept { return _modelId; }
//! Returns the CPU brand id.
//!
//! Family identifier matches the BrandId read by using CPUID on X86 architecture.
inline uint32_t brandId() const noexcept { return _brandId; }
//! Returns the CPU stepping.
//!
//! Family identifier matches the Stepping information read by using CPUID on X86 architecture.
inline uint32_t stepping() const noexcept { return _stepping; }
//! Returns the processor type.
//!
//! Family identifier matches the ProcessorType read by using CPUID on X86 architecture.
inline uint32_t processorType() const noexcept { return _processorType; }
//! Returns the number of maximum logical processors.
//! Returns the maximum number of logical processors.
inline uint32_t maxLogicalProcessors() const noexcept { return _maxLogicalProcessors; }
//! Returns the size of a cache line flush.
inline uint32_t cacheLineSize() const noexcept { return _cacheLineSize; }
//! Returns number of hardware threads available.
inline uint32_t hwThreadCount() const noexcept { return _hwThreadCount; }
//! Returns the CPU vendor.
//! Returns a CPU vendor string.
inline const char* vendor() const noexcept { return _vendor.str; }
//! Tests whether the CPU vendor is equal to `s`.
//! Tests whether the CPU vendor string is equal to `s`.
inline bool isVendor(const char* s) const noexcept { return _vendor.eq(s); }
//! Returns the CPU brand string.
//! Returns a CPU brand string.
inline const char* brand() const noexcept { return _brand.str; }
//! Returns all CPU features as `BaseFeatures`, cast to your arch-specific class
//! if needed.
template<typename T = BaseFeatures>
inline const T& features() const noexcept { return _features.as<T>(); }
//! Returns CPU features.
inline CpuFeatures& features() noexcept { return _features; }
//! Returns CPU features (const).
inline const CpuFeatures& features() const noexcept { return _features; }
//! Tests whether the CPU has the given `feature`.
inline bool hasFeature(uint32_t featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `feature` to the list of this CpuInfo features.
inline CpuInfo& addFeature(uint32_t featureId) noexcept { _features.add(featureId); return *this; }
template<typename FeatureId>
inline bool hasFeature(const FeatureId& featureId) const noexcept { return _features.has(featureId); }
//! Adds the given CPU `featureId` to the list of features.
template<typename... Args>
inline void addFeature(Args&&... args) noexcept { return _features.add(std::forward<Args>(args)...); }
//! Removes the given CPU `featureId` from the list of features.
template<typename... Args>
inline void removeFeature(Args&&... args) noexcept { return _features.remove(std::forward<Args>(args)...); }
//! \}
};

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
@@ -33,12 +15,11 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitHelper - Formatting]
// ============================================================================
// BaseEmitHelper - Formatting
// ===========================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, uint32_t arch, const FuncValue& value) noexcept {
static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
sb.append('@');
@@ -59,7 +40,7 @@ static void dumpFuncValue(String& sb, uint32_t arch, const FuncValue& value) noe
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
typedef FuncArgsContext::Var Var;
uint32_t arch = ctx.arch();
Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount();
for (uint32_t i = 0; i < varCount; i++) {
@@ -80,9 +61,8 @@ static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
}
#endif
// ============================================================================
// [asmjit::BaseEmitHelper - EmitArgsAssignment]
// ============================================================================
// BaseEmitHelper - EmitArgsAssignment
// ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
typedef FuncArgsContext::Var Var;
@@ -95,7 +75,7 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
kWorkPostponed = 0x04
};
uint32_t arch = frame.arch();
Arch arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
RAConstraints constraints;
@@ -112,11 +92,11 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
}
#endif
auto& workData = ctx._workData;
uint32_t varCount = ctx._varCount;
WorkData* workData = ctx._workData;
uint32_t saVarId = ctx._saVarId;
BaseReg sp = BaseReg::fromSignatureAndId(_emitter->_gpRegInfo.signature(), archTraits.spRegId());
BaseReg sp = BaseReg(_emitter->_gpSignature, archTraits.spRegId());
BaseReg sa = sp;
if (frame.hasDynamicAlignment()) {
@@ -126,10 +106,8 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
}
// --------------------------------------------------------------------------
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
// --------------------------------------------------------------------------
if (ctx._stackDstMask) {
// Base address of all arguments passed by stack.
@@ -163,33 +141,32 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[archTraits.regTypeToGroup(cur.regType())];
uint32_t rId = cur.regId();
uint32_t regId = cur.regId();
reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), rId);
wd.unassign(varId, rId);
reg.setSignatureAndId(archTraits.regTypeToSignature(cur.regType()), regId);
wd.unassign(varId, regId);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which
// register to use. In general we follow the rule that IntToInt moves will use
// GP regs with possibility to signature or zero extend, and all other moves will
// either use GP or VEC regs depending on the size of the move.
RegInfo rInfo = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!rInfo.isValid()))
// Stack to reg move - tricky since we move stack to stack we can decide which register to use. In general
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid()))
return DebugUtils::errored(kErrorInvalidState);
WorkData& wd = workData[rInfo.group()];
uint32_t availableRegs = wd.availableRegs();
WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs))
return DebugUtils::errored(kErrorInvalidState);
uint32_t rId = Support::ctz(availableRegs);
reg.setSignatureAndId(rInfo.signature(), rId);
uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
}
if (cur.isIndirect() && cur.isReg())
workData[BaseReg::kGroupGp].unassign(varId, cur.regId());
workData[RegGroup::kGp].unassign(varId, cur.regId());
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
@@ -197,10 +174,7 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
}
}
// --------------------------------------------------------------------------
// Shuffle all registers that are currently assigned accordingly to target
// assignment.
// --------------------------------------------------------------------------
// Shuffle all registers that are currently assigned accordingly to target assignment.
uint32_t workFlags = kWorkNone;
for (;;) {
@@ -212,8 +186,8 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
FuncValue& cur = var.cur;
FuncValue& out = var.out;
uint32_t curGroup = archTraits.regTypeToGroup(cur.regType());
uint32_t outGroup = archTraits.regTypeToGroup(out.regType());
RegGroup curGroup = archTraits.regTypeToGroup(cur.regType());
RegGroup outGroup = archTraits.regTypeToGroup(out.regType());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
@@ -228,8 +202,8 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
BaseReg(archTraits.regTypeToSignature(out.regType()), outId), out.typeId(),
BaseReg(archTraits.regTypeToSignature(cur.regType()), curId), cur.typeId()));
wd.reassign(varId, outId, curId);
cur.initReg(out.regType(), outId, out.typeId());
@@ -244,15 +218,15 @@ EmitMove:
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasSwap(curGroup)) {
uint32_t highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween<uint32_t>(highestType, BaseReg::kTypeGp8Lo, BaseReg::kTypeGp16))
highestType = BaseReg::kTypeGp32;
if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16))
highestType = RegType::kGp32;
uint32_t signature = archTraits.regTypeToSignature(highestType);
OperandSignature signature = archTraits.regTypeToSignature(highestType);
ASMJIT_PROPAGATE(
emitRegSwap(BaseReg::fromSignatureAndId(signature, outId),
BaseReg::fromSignatureAndId(signature, curId)));
emitRegSwap(BaseReg(signature, outId), BaseReg(signature, curId)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
@@ -264,9 +238,9 @@ EmitMove:
}
else {
// If there is a scratch register it can be used to perform the swap.
uint32_t availableRegs = wd.availableRegs();
RegMask availableRegs = wd.availableRegs();
if (availableRegs) {
uint32_t inOutRegs = wd.dstRegs();
RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs)
availableRegs &= ~inOutRegs;
outId = Support::ctz(availableRegs);
@@ -294,10 +268,8 @@ EmitMove:
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// --------------------------------------------------------------------------
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
// --------------------------------------------------------------------------
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
@@ -317,12 +289,12 @@ EmitMove:
ASMJIT_ASSERT(var.out.isReg());
uint32_t outId = var.out.regId();
uint32_t outType = var.out.regType();
RegType outType = var.out.regType();
uint32_t group = archTraits.regTypeToGroup(outType);
WorkData& wd = ctx._workData[group];
RegGroup group = archTraits.regTypeToGroup(outType);
WorkData& wd = workData[group];
if (outId == sa.id() && group == BaseReg::kGroupGp) {
if (outId == sa.id() && group == RegGroup::kGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
@@ -331,7 +303,7 @@ EmitMove:
wd.unassign(wd._physToVarId[outId], outId);
}
BaseReg dstReg = BaseReg::fromSignatureAndId(archTraits.regTypeToSignature(outType), outId);
BaseReg dstReg = BaseReg(archTraits.regTypeToSignature(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
ASMJIT_PROPAGATE(emitArgMove(

View File

@@ -1,26 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
#define ASMJIT_CORE_EMITHELPER_P_H_INCLUDED
@@ -35,10 +16,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseEmitHelper]
// ============================================================================
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
@@ -50,12 +27,11 @@ public:
inline BaseEmitter* emitter() const noexcept { return _emitter; }
inline void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or
//! between a register and its home slot. This function does not handle
//! register conversion.
//! Emits a pure move operation between two registers or the same type or between a register and its home
//! slot. This function does not handle register conversion.
virtual Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, uint32_t typeId, const char* comment = nullptr) = 0;
const Operand_& src_, TypeId typeId, const char* comment = nullptr) = 0;
//! Emits swap between two registers.
virtual Error emitRegSwap(
@@ -64,13 +40,12 @@ public:
//! Emits move from a function argument (either register or stack) to a register.
//!
//! This function can handle the necessary conversion from one argument to
//! another, and from one register type to another, if it's possible. Any
//! attempt of conversion that requires third register of a different group
//! This function can handle the necessary conversion from one argument to another, and from one register type
//! to another, if it's possible. Any attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const BaseReg& dst_, uint32_t dstTypeId,
const Operand_& src_, uint32_t srcTypeId, const char* comment = nullptr) = 0;
const BaseReg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) = 0;
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/emitterutils_p.h"
@@ -39,91 +21,85 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::BaseEmitter - Construction / Destruction]
// ============================================================================
// BaseEmitter - Construction & Destruction
// ========================================
BaseEmitter::BaseEmitter(uint32_t emitterType) noexcept
: _emitterType(uint8_t(emitterType)) {}
BaseEmitter::BaseEmitter(EmitterType emitterType) noexcept
: _emitterType(emitterType) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(kFlagDestroyed);
_addEmitterFlags(EmitterFlags::kDestroyed);
_code->detach(this);
}
}
// ============================================================================
// [asmjit::BaseEmitter - Finalize]
// ============================================================================
// BaseEmitter - Finalize
// ======================
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
}
// ============================================================================
// [asmjit::BaseEmitter - Internals]
// ============================================================================
// BaseEmitter - Internals
// =======================
static constexpr uint32_t kEmitterPreservedFlags = BaseEmitter::kFlagOwnLogger | BaseEmitter::kFlagOwnErrorHandler;
static constexpr EmitterFlags kEmitterPreservedFlags = EmitterFlags::kOwnLogger | EmitterFlags::kOwnErrorHandler;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool emitComments = false;
bool hasValidationOptions = false;
bool hasDiagnosticOptions = false;
if (self->emitterType() == BaseEmitter::kTypeAssembler) {
if (self->emitterType() == EmitterType::kAssembler) {
// Assembler: Don't emit comments if logger is not attached.
emitComments = self->_code != nullptr && self->_logger != nullptr;
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionAssembler);
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateAssembler);
}
else {
// Builder/Compiler: Always emit comments, we cannot assume they won't be used.
emitComments = self->_code != nullptr;
hasValidationOptions = self->hasValidationOption(BaseEmitter::kValidationOptionIntermediate);
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
}
if (emitComments)
self->_addEmitterFlags(BaseEmitter::kFlagLogComments);
self->_addEmitterFlags(EmitterFlags::kLogComments);
else
self->_clearEmitterFlags(BaseEmitter::kFlagLogComments);
self->_clearEmitterFlags(EmitterFlags::kLogComments);
// The reserved option tells emitter (Assembler/Builder/Compiler) that there
// may be either a border case (CodeHolder not attached, for example) or that
// logging or validation is required.
if (self->_code == nullptr || self->_logger || hasValidationOptions)
self->_forcedInstOptions |= BaseInst::kOptionReserved;
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions)
self->_forcedInstOptions |= InstOptions::kReserved;
else
self->_forcedInstOptions &= ~BaseInst::kOptionReserved;
self->_forcedInstOptions &= ~InstOptions::kReserved;
}
// ============================================================================
// [asmjit::BaseEmitter - Validation Options]
// ============================================================================
// BaseEmitter - Diagnostic Options
// ================================
void BaseEmitter::addValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
void BaseEmitter::addDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions |= options;
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearValidationOptions(uint32_t options) noexcept {
_validationOptions = uint8_t(_validationOptions | options);
void BaseEmitter::clearDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions &= ~options;
BaseEmitter_updateForcedOptions(this);
}
// ============================================================================
// [asmjit::BaseEmitter - Logging]
// ============================================================================
// BaseEmitter - Logging
// =====================
void BaseEmitter::setLogger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(kFlagOwnLogger);
_addEmitterFlags(EmitterFlags::kOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(kFlagOwnLogger);
_clearEmitterFlags(EmitterFlags::kOwnLogger);
if (_code)
_logger = _code->logger();
}
@@ -133,18 +109,17 @@ void BaseEmitter::setLogger(Logger* logger) noexcept {
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Error Handling]
// ============================================================================
// BaseEmitter - Error Handling
// ============================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(kFlagOwnErrorHandler);
_addEmitterFlags(EmitterFlags::kOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(kFlagOwnErrorHandler);
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
if (_code)
_errorHandler = _code->errorHandler();
}
@@ -160,58 +135,55 @@ Error BaseEmitter::reportError(Error err, const char* message) {
return err;
}
// ============================================================================
// [asmjit::BaseEmitter - Labels]
// ============================================================================
// BaseEmitter - Labels
// ====================
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : uint32_t(Globals::kInvalidId));
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : Globals::kInvalidId);
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (Low-Level)]
// ============================================================================
// BaseEmitter - Emit (Low-Level)
// ==============================
using EmitterUtils::noExt;
Error BaseEmitter::_emitI(uint32_t instId) {
Error BaseEmitter::_emitI(InstId instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
}
Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
Error BaseEmitter::_emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
const Operand_* op = operands;
Operand_ opExt[3];
switch (opCount) {
@@ -247,9 +219,8 @@ Error BaseEmitter::_emitOpArray(uint32_t instId, const Operand_* operands, size_
}
}
// ============================================================================
// [asmjit::BaseEmitter - Emit (High-Level)]
// ============================================================================
// BaseEmitter - Emit (High-Level)
// ===============================
ASMJIT_FAVOR_SIZE Error BaseEmitter::emitProlog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
@@ -314,13 +285,12 @@ ASMJIT_FAVOR_SIZE Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame,
return DebugUtils::errored(kErrorInvalidArch);
}
// ============================================================================
// [asmjit::BaseEmitter - Comment]
// ============================================================================
// BaseEmitter - Comment
// =====================
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
@@ -342,8 +312,8 @@ Error BaseEmitter::commentf(const char* fmt, ...) {
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(kFlagLogComments)) {
if (!hasEmitterFlag(kFlagAttached))
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached))
return reportError(DebugUtils::errored(kErrorNotInitialized));
return kErrorOk;
}
@@ -360,18 +330,17 @@ Error BaseEmitter::commentv(const char* fmt, va_list ap) {
#endif
}
// ============================================================================
// [asmjit::BaseEmitter - Events]
// ============================================================================
// BaseEmitter - Events
// ====================
Error BaseEmitter::onAttach(CodeHolder* code) noexcept {
_code = code;
_environment = code->environment();
_addEmitterFlags(kFlagAttached);
_addEmitterFlags(EmitterFlags::kAttached);
const ArchTraits& archTraits = ArchTraits::byArch(code->arch());
uint32_t nativeRegType = Environment::is32Bit(code->arch()) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
_gpRegInfo.setSignature(archTraits._regInfo[nativeRegType].signature());
RegType nativeRegType = Environment::is32Bit(code->arch()) ? RegType::kGp32 : RegType::kGp64;
_gpSignature = archTraits.regTypeToSignature(nativeRegType);
onSettingsUpdated();
return kErrorOk;
@@ -387,13 +356,13 @@ Error BaseEmitter::onDetach(CodeHolder* code) noexcept {
_errorHandler = nullptr;
_clearEmitterFlags(~kEmitterPreservedFlags);
_forcedInstOptions = BaseInst::kOptionReserved;
_forcedInstOptions = InstOptions::kReserved;
_privateData = 0;
_environment.reset();
_gpRegInfo.reset();
_gpSignature.reset();
_instOptions = 0;
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTER_H_INCLUDED
#define ASMJIT_CORE_EMITTER_H_INCLUDED
@@ -35,41 +17,203 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class ConstPool;
class FuncFrame;
class FuncArgsAssignment;
// ============================================================================
// [asmjit::BaseEmitter]
// ============================================================================
//! Align mode, used by \ref BaseEmitter::align().
enum class AlignMode : uint8_t {
//! Align executable code.
kCode = 0,
//! Align non-executable code.
kData = 1,
//! Align by a sequence of zeros.
kZero = 2,
//! Provides a base foundation to emit code - specialized by `Assembler` and
//! `BaseBuilder`.
//! Maximum value of `AlignMode`.
kMaxValue = kZero
};
//! Emitter type used by \ref BaseEmitter.
enum class EmitterType : uint8_t {
//! Unknown or uninitialized.
kNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kCompiler = 3,
//! Maximum value of `EmitterType`.
kMaxValue = kCompiler
};
//! Emitter flags, used by \ref BaseEmitter.
enum class EmitterFlags : uint8_t {
//! No flags.
kNone = 0u,
//! Emitter is attached to CodeHolder.
kAttached = 0x01u,
//! The emitter must emit comments.
kLogComments = 0x08u,
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFinalized = 0x40u,
//! The emitter was destroyed.
//!
//! This flag is used for a very short time when an emitter is being destroyed by
//! CodeHolder.
kDestroyed = 0x80u
};
ASMJIT_DEFINE_ENUM_FLAGS(EmitterFlags)
//! Encoding options.
enum class EncodingOptions : uint32_t {
//! No encoding options.
kNone = 0,
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions if possible into operation equivalent
//! instructions that take less bytes by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm` and `and r32, imm` when the
//! immediate constant is lesser than `2^31`.
kOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90) opcode that is often shown by disassemblers
//! as NOP. However there are more optimized align sequences for 2-11 bytes that may execute faster on certain CPUs.
//! If this feature is enabled AsmJit will generate specialized sequences for alignment between 2 to 11 bytes.
kOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the jump is backward it is usually predicted as
//! taken; and if the jump is forward it is usually predicted as not-taken. The reason is that loops generally use
//! backward jumps and conditions usually use forward jumps. However this behavior can be overridden by using
//! instruction prefixes. If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that used to take into consideration prediction
//! hints was P4. Newer processors implement heuristics for branch prediction and ignore static hints. This means
//! that this feature can be only used for annotation purposes.
kPredictedJumps = 0x00000010u
};
ASMJIT_DEFINE_ENUM_FLAGS(EncodingOptions)
//! Diagnostic options are used to tell emitters and their passes to perform diagnostics when emitting or processing
//! user code. These options control validation and extra diagnostics that can be performed by higher level emitters.
//!
//! Instruction Validation
//! ----------------------
//!
//! \ref BaseAssembler implementation perform by default only basic checks that are necessary to identify all
//! variations of an instruction so the correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down. However, sometimes these checks are
//! beneficial especially when the project that uses AsmJit is in a development phase, in which mistakes happen
//! often. To make the experience of using AsmJit seamless it offers validation features that can be controlled
//! by \ref DiagnosticOptions.
//!
//! Compiler Diagnostics
//! --------------------
//!
//! Diagnostic options work with \ref BaseCompiler passes (precisely with its register allocation pass). These options
//! can be used to enable logging of all operations that the Compiler does.
enum class DiagnosticOptions : uint32_t {
//! No validation options.
kNone = 0,
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded into a binary representation. This flag
//! is only relevant for \ref BaseAssembler implementations, but can be set in any other emitter type, in that case
//! if that emitter needs to create an assembler on its own, for the purpose of \ref BaseEmitter::finalize() it
//! would propagate this flag to such assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidateAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref InstNode representing the instruction is
//! created by \ref BaseBuilder or \ref BaseCompiler. This option could be more useful than \ref kValidateAssembler
//! in cases in which there is an invalid instruction passed to an assembler, which was invalid much earlier, most
//! likely when such instruction was passed to Builder/Compiler.
//!
//! This is a separate option that was introduced, because it's possible to manipulate the instruction stream
//! emitted by \ref BaseBuilder and \ref BaseCompiler - this means that it's allowed to emit invalid instructions
//! (for example with missing operands) that will be fixed later before finalizing it.
//!
//! Default: false.
kValidateIntermediate = 0x00000002u,
//! Annotate all nodes processed by register allocator (Compiler/RA).
//!
//! \note Annotations don't need debug options, however, some debug options like `kRADebugLiveness` may influence
//! their output (for example the mentioned option would add liveness information to per-instruction annotation).
kRAAnnotate = 0x00000080u,
//! Debug CFG generation and other related algorithms / operations (Compiler/RA).
kRADebugCFG = 0x00000100u,
//! Debug liveness analysis (Compiler/RA).
kRADebugLiveness = 0x00000200u,
//! Debug register allocation assignment (Compiler/RA).
kRADebugAssignment = 0x00000400u,
//! Debug the removal of code part of unreachable blocks.
kRADebugUnreachable = 0x00000800u,
//! Enable all debug options (Compiler/RA).
kRADebugAll = 0x0000FF00u,
};
ASMJIT_DEFINE_ENUM_FLAGS(DiagnosticOptions)
//! Provides a base foundation to emitting code - specialized by \ref BaseAssembler and \ref BaseBuilder.
class ASMJIT_VIRTAPI BaseEmitter {
public:
ASMJIT_BASE_CLASS(BaseEmitter)
//! See \ref EmitterType.
uint8_t _emitterType = 0;
//! See \ref BaseEmitter::EmitterFlags.
uint8_t _emitterFlags = 0;
//! Validation flags in case validation is used, see \ref InstAPI::ValidationFlags.
//!
//! \note Validation flags are specific to the emitter and they are setup at
//! construction time and then never changed.
uint8_t _validationFlags = 0;
//! Validation options, see \ref ValidationOptions.
uint8_t _validationOptions = 0;
//! \name Members
//! \{
//! Encoding options, see \ref EncodingOptions.
uint32_t _encodingOptions = 0;
//! See \ref EmitterType.
EmitterType _emitterType = EmitterType::kNone;
//! See \ref EmitterFlags.
EmitterFlags _emitterFlags = EmitterFlags::kNone;
//! Validation flags in case validation is used.
//!
//! \note Validation flags are specific to the emitter and they are setup at construction time and then never
//! changed.
ValidationFlags _validationFlags = ValidationFlags::kNone;
//! Validation options.
DiagnosticOptions _diagnosticOptions = DiagnosticOptions::kNone;
//! Encoding options.
EncodingOptions _encodingOptions = EncodingOptions::kNone;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
uint32_t _forcedInstOptions = BaseInst::kOptionReserved;
InstOptions _forcedInstOptions = InstOptions::kReserved;
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
@@ -83,143 +227,21 @@ public:
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature and signature related information.
RegInfo _gpRegInfo {};
OperandSignature _gpSignature {};
//! Next instruction options (affects the next instruction).
uint32_t _instOptions = 0;
InstOptions _instOptions = InstOptions::kNone;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
//! Emitter type.
enum EmitterType : uint32_t {
//! Unknown or uninitialized.
kTypeNone = 0,
//! Emitter inherits from \ref BaseAssembler.
kTypeAssembler = 1,
//! Emitter inherits from \ref BaseBuilder.
kTypeBuilder = 2,
//! Emitter inherits from \ref BaseCompiler.
kTypeCompiler = 3,
//! Count of emitter types.
kTypeCount = 4
};
//! Emitter flags.
enum EmitterFlags : uint32_t {
//! Emitter is attached to CodeHolder.
kFlagAttached = 0x01u,
//! The emitter must emit comments.
kFlagLogComments = 0x08u,
//! The emitter has its own \ref Logger (not propagated from \ref CodeHolder).
kFlagOwnLogger = 0x10u,
//! The emitter has its own \ref ErrorHandler (not propagated from \ref CodeHolder).
kFlagOwnErrorHandler = 0x20u,
//! The emitter was finalized.
kFlagFinalized = 0x40u,
//! The emitter was destroyed.
kFlagDestroyed = 0x80u
};
//! Encoding options.
enum EncodingOptions : uint32_t {
//! Emit instructions that are optimized for size, if possible.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! When this option is set it the assembler will try to fix instructions
//! if possible into operation equivalent instructions that take less bytes
//! by taking advantage of implicit zero extension. For example instruction
//! like `mov r64, imm` and `and r64, imm` can be translated to `mov r32, imm`
//! and `and r32, imm` when the immediate constant is lesser than `2^31`.
kEncodingOptionOptimizeForSize = 0x00000001u,
//! Emit optimized code-alignment sequences.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Default align sequence used by X86 architecture is one-byte (0x90)
//! opcode that is often shown by disassemblers as NOP. However there are
//! more optimized align sequences for 2-11 bytes that may execute faster
//! on certain CPUs. If this feature is enabled AsmJit will generate
//! specialized sequences for alignment between 2 to 11 bytes.
kEncodingOptionOptimizedAlign = 0x00000002u,
//! Emit jump-prediction hints.
//!
//! Default: false.
//!
//! X86 Specific
//! ------------
//!
//! Jump prediction is usually based on the direction of the jump. If the
//! jump is backward it is usually predicted as taken; and if the jump is
//! forward it is usually predicted as not-taken. The reason is that loops
//! generally use backward jumps and conditions usually use forward jumps.
//! However this behavior can be overridden by using instruction prefixes.
//! If this option is enabled these hints will be emitted.
//!
//! This feature is disabled by default, because the only processor that
//! used to take into consideration prediction hints was P4. Newer processors
//! implement heuristics for branch prediction and ignore static hints. This
//! means that this feature can be only used for annotation purposes.
kEncodingOptionPredictedJumps = 0x00000010u
};
#ifndef ASMJIT_NO_DEPRECATED
enum EmitterOptions : uint32_t {
kOptionOptimizedForSize = kEncodingOptionOptimizeForSize,
kOptionOptimizedAlign = kEncodingOptionOptimizedAlign,
kOptionPredictedJumps = kEncodingOptionPredictedJumps
};
#endif
//! Validation options are used to tell emitters to perform strict validation
//! of instructions passed to \ref emit().
//!
//! \ref BaseAssembler implementation perform by default only basic checks
//! that are necessary to identify all variations of an instruction so the
//! correct encoding can be selected. This is fine for production-ready code
//! as the assembler doesn't have to perform checks that would slow it down.
//! However, sometimes these checks are beneficial especially when the project
//! that uses AsmJit is in a development phase, in which mistakes happen often.
//! To make the experience of using AsmJit seamless it offers validation
//! features that can be controlled by `ValidationOptions`.
enum ValidationOptions : uint32_t {
//! Perform strict validation in \ref BaseAssembler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before it's encoded
//! into a binary representation. This flag is only relevant for \ref
//! BaseAssembler implementations, but can be set in any other emitter type,
//! in that case if that emitter needs to create an assembler on its own,
//! for the purpose of \ref finalize() it would propagate this flag to such
//! assembler so all instructions passed to it are explicitly validated.
//!
//! Default: false.
kValidationOptionAssembler = 0x00000001u,
//! Perform strict validation in \ref BaseBuilder::emit() and \ref
//! BaseCompiler::emit() implementations.
//!
//! This flag ensures that each instruction is checked before an \ref
//! InstNode representing the instruction is created by Builder or Compiler.
//!
//! Default: false.
kValidationOptionIntermediate = 0x00000002u
};
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(uint32_t emitterType) noexcept;
ASMJIT_API explicit BaseEmitter(EmitterType emitterType) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
@@ -239,28 +261,28 @@ public:
//! \{
//! Returns the type of this emitter, see `EmitterType`.
inline uint32_t emitterType() const noexcept { return _emitterType; }
inline EmitterType emitterType() const noexcept { return _emitterType; }
//! Returns emitter flags , see `Flags`.
inline uint32_t emitterFlags() const noexcept { return _emitterFlags; }
inline EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
inline bool isAssembler() const noexcept { return _emitterType == kTypeAssembler; }
inline bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
inline bool isBuilder() const noexcept { return _emitterType >= kTypeBuilder; }
inline bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`.
inline bool isCompiler() const noexcept { return _emitterType == kTypeCompiler; }
inline bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
inline bool hasEmitterFlag(uint32_t flag) const noexcept { return (_emitterFlags & flag) != 0; }
inline bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
//! Tests whether the emitter is finalized.
inline bool isFinalized() const noexcept { return hasEmitterFlag(kFlagFinalized); }
inline bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
inline bool isDestroyed() const noexcept { return hasEmitterFlag(kFlagDestroyed); }
inline bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
inline void _addEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags | flags); }
inline void _clearEmitterFlags(uint32_t flags) noexcept { _emitterFlags = uint8_t(_emitterFlags & ~flags); }
inline void _addEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags |= flags; }
inline void _clearEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags &= _emitterFlags & ~flags; }
//! \}
@@ -270,7 +292,7 @@ public:
//! Returns the CodeHolder this emitter is attached to.
inline CodeHolder* code() const noexcept { return _code; }
//! Returns the target environment, see \ref Environment.
//! Returns the target environment.
//!
//! The returned \ref Environment reference matches \ref CodeHolder::environment().
inline const Environment& environment() const noexcept { return _environment; }
@@ -281,9 +303,9 @@ public:
inline bool is64Bit() const noexcept { return environment().is64Bit(); }
//! Returns the target architecture type.
inline uint32_t arch() const noexcept { return environment().arch(); }
inline Arch arch() const noexcept { return environment().arch(); }
//! Returns the target architecture sub-type.
inline uint32_t subArch() const noexcept { return environment().subArch(); }
inline SubArch subArch() const noexcept { return environment().subArch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
inline uint32_t registerSize() const noexcept { return environment().registerSize(); }
@@ -298,12 +320,10 @@ public:
//! Finalizes this emitter.
//!
//! Materializes the content of the emitter by serializing it to the attached
//! \ref CodeHolder through an architecture specific \ref BaseAssembler. This
//! function won't do anything if the emitter inherits from \ref BaseAssembler
//! as assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder.
//! However, if this is an emitter that inherits from \ref BaseBuilder or \ref
//! BaseCompiler then these emitters need the materialization phase as they
//! Materializes the content of the emitter by serializing it to the attached \ref CodeHolder through an architecture
//! specific \ref BaseAssembler. This function won't do anything if the emitter inherits from \ref BaseAssembler as
//! assemblers emit directly to a \ref CodeBuffer held by \ref CodeHolder. However, if this is an emitter that
//! inherits from \ref BaseBuilder or \ref BaseCompiler then these emitters need the materialization phase as they
//! store their content in a representation not visible to \ref CodeHolder.
ASMJIT_API virtual Error finalize();
@@ -317,29 +337,27 @@ public:
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used
//! by \ref CodeHolder this emitter is attached to.
inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(kFlagOwnLogger); }
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to.
inline bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses.
//!
//! The returned logger is either the emitter's own logger or it's logger
//! used by \ref CodeHolder this emitter is attached to.
//! The returned logger is either the emitter's own logger or it's logger used by \ref CodeHolder this emitter
//! is attached to.
inline Logger* logger() const noexcept { return _logger; }
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered
//! emitter's own logger, see \ref hasOwnLogger() for more details. If the
//! given `logger` is null then the emitter will automatically use logger
//! If the `logger` argument is non-null then the logger will be considered emitter's own logger, see \ref
//! hasOwnLogger() for more details. If the given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this
//! emitter is attached to, or no logger at all if \ref CodeHolder doesn't
//! have one.
//! The emitter will bail to using a logger attached to \ref CodeHolder this emitter is attached to, or no logger
//! at all if \ref CodeHolder doesn't have one.
inline void resetLogger() noexcept { return setLogger(nullptr); }
//! \}
@@ -352,14 +370,14 @@ public:
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that
//! may be used by \ref CodeHolder this emitter is attached to.
inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(kFlagOwnErrorHandler); }
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to.
inline bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or
//! it's error handler used by \ref CodeHolder this emitter is attached to.
//! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to.
inline ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
//! Sets or resets the error handler of the emitter.
@@ -369,11 +387,9 @@ public:
inline void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its
//! \ref ErrorHandler::handleError() member function first, and
//! then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is
//! simply returned.
//! 1. If the emitter has \ref ErrorHandler attached, it calls its \ref ErrorHandler::handleError() member function
//! first, and then returns the error. The `handleError()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is simply returned.
ASMJIT_API Error reportError(Error err, const char* message = nullptr);
//! \}
@@ -381,61 +397,51 @@ public:
//! \name Encoding Options
//! \{
//! Returns encoding options, see \ref EncodingOptions.
inline uint32_t encodingOptions() const noexcept { return _encodingOptions; }
//! Returns encoding options.
inline EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
//! Tests whether the encoding `option` is set.
inline bool hasEncodingOption(uint32_t option) const noexcept { return (_encodingOptions & option) != 0; }
inline bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
//! Enables the given encoding `options`, see \ref EncodingOptions.
inline void addEncodingOptions(uint32_t options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`, see \ref EncodingOptions.
inline void clearEncodingOptions(uint32_t options) noexcept { _encodingOptions &= ~options; }
//! Enables the given encoding `options`.
inline void addEncodingOptions(EncodingOptions options) noexcept { _encodingOptions |= options; }
//! Disables the given encoding `options`.
inline void clearEncodingOptions(EncodingOptions options) noexcept { _encodingOptions &= ~options; }
//! \}
//! \name Validation Options
//! \name Diagnostic Options
//! \{
//! Returns the emitter's validation options, see \ref ValidationOptions.
inline uint32_t validationOptions() const noexcept {
return _validationOptions;
}
//! Returns the emitter's diagnostic options.
inline DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
//! Tests whether the given `option` is present in validation options.
inline bool hasValidationOption(uint32_t option) const noexcept {
return (_validationOptions & option) != 0;
}
//! Tests whether the given `option` is present in the emitter's diagnostic options.
inline bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Activates the given validation `options`, see \ref ValidationOptions.
//! Activates the given diagnostic `options`.
//!
//! This function is used to activate explicit validation options that will
//! be then used by all emitter implementations. There are in general two
//! possibilities:
//! This function is used to activate explicit validation options that will be then used by all emitter
//! implementations. There are in general two possibilities:
//!
//! - Architecture specific assembler is used. In this case a
//! \ref kValidationOptionAssembler can be used to turn on explicit
//! validation that will be used before an instruction is emitted.
//! This means that internally an extra step will be performed to
//! make sure that the instruction is correct. This is needed, because
//! by default assemblers prefer speed over strictness.
//! - Architecture specific assembler is used. In this case a \ref DiagnosticOptions::kValidateAssembler can be
//! used to turn on explicit validation that will be used before an instruction is emitted. This means that
//! internally an extra step will be performed to make sure that the instruction is correct. This is needed,
//! because by default assemblers prefer speed over strictness.
//!
//! This option should be used in debug builds as it's pretty expensive.
//!
//! - Architecture specific builder or compiler is used. In this case
//! the user can turn on \ref kValidationOptionIntermediate option
//! that adds explicit validation step before the Builder or Compiler
//! creates an \ref InstNode to represent an emitted instruction. Error
//! will be returned if the instruction is ill-formed. In addition,
//! also \ref kValidationOptionAssembler can be used, which would not be
//! consumed by Builder / Compiler directly, but it would be propagated
//! to an architecture specific \ref BaseAssembler implementation it
//! creates during \ref BaseEmitter::finalize().
ASMJIT_API void addValidationOptions(uint32_t options) noexcept;
//! - Architecture specific builder or compiler is used. In this case the user can turn on
//! \ref DiagnosticOptions::kValidateIntermediate option that adds explicit validation step before the Builder
//! or Compiler creates an \ref InstNode to represent an emitted instruction. Error will be returned if the
//! instruction is ill-formed. In addition, also \ref DiagnosticOptions::kValidateAssembler can be used, which
//! would not be consumed by Builder / Compiler directly, but it would be propagated to an architecture specific
//! \ref BaseAssembler implementation it creates during \ref BaseEmitter::finalize().
ASMJIT_API void addDiagnosticOptions(DiagnosticOptions options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addValidationOptions() and \ref ValidationOptions for more details.
ASMJIT_API void clearValidationOptions(uint32_t options) noexcept;
//! See \ref addDiagnosticOptions() and \ref DiagnosticOptions for more details.
ASMJIT_API void clearDiagnosticOptions(DiagnosticOptions options) noexcept;
//! \}
@@ -444,20 +450,19 @@ public:
//! Returns forced instruction options.
//!
//! Forced instruction options are merged with next instruction options before
//! the instruction is encoded. These options have some bits reserved that are
//! used by error handling, logging, and instruction validation purposes. Other
//! options are globals that affect each instruction.
inline uint32_t forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Forced instruction options are merged with next instruction options before the instruction is encoded. These
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction.
inline InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
//! Returns options of the next instruction.
inline uint32_t instOptions() const noexcept { return _instOptions; }
inline InstOptions instOptions() const noexcept { return _instOptions; }
//! Returns options of the next instruction.
inline void setInstOptions(uint32_t options) noexcept { _instOptions = options; }
inline void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
//! Adds options of the next instruction.
inline void addInstOptions(uint32_t options) noexcept { _instOptions |= options; }
inline void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
//! Resets options of the next instruction.
inline void resetInstOptions() noexcept { _instOptions = 0; }
inline void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
//! Tests whether the extra register operand is valid.
inline bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
@@ -474,9 +479,8 @@ public:
inline const char* inlineComment() const noexcept { return _inlineComment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has
//! to remain valid as the Emitter is not required to make a copy of it (and
//! it would be slow to do that for each instruction).
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction).
inline void setInlineComment(const char* s) noexcept { _inlineComment = s; }
//! Resets the comment/annotation to nullptr.
inline void resetInlineComment() noexcept { _inlineComment = nullptr; }
@@ -496,19 +500,19 @@ public:
//! Creates a new label.
virtual Label newLabel() = 0;
//! Creates a new named label.
virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, uint32_t type = Label::kTypeGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) = 0;
//! Creates a new anonymous label with a name, which can only be used for debugging purposes.
inline Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
//! Creates a new external label.
inline Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) {
return newNamedLabel(name, nameSize, Label::kTypeExternal);
}
inline Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
//! Returns `Label` by `name`.
//!
//! Returns invalid Label in case that the name is invalid or label was not found.
//!
//! \note This function doesn't trigger ErrorHandler in case the name is invalid
//! or no such label exist. You must always check the validity of the `Label` returned.
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned.
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
//! Binds the `label` to the current position of the current section.
@@ -526,41 +530,39 @@ public:
//! \name Emit
//! \{
// NOTE: These `emit()` helpers are designed to address a code-bloat generated
// by C++ compilers to call a function having many arguments. Each parameter to
// `_emit()` requires some code to pass it, which means that if we default to
// 5 arguments in `_emit()` and instId the C++ compiler would have to generate
// a virtual function call having 5 parameters and additional `this` argument,
// which is quite a lot. Since by default most instructions have 2 to 3 operands
// it's better to introduce helpers that pass from 0 to 6 operands that help to
// reduce the size of emit(...) function call.
// NOTE: These `emit()` helpers are designed to address a code-bloat generated by C++ compilers to call a function
// having many arguments. Each parameter to `_emit()` requires some code to pass it, which means that if we default
// to 5 arguments in `_emit()` and instId the C++ compiler would have to generate a virtual function call having 5
// parameters and additional `this` argument, which is quite a lot. Since by default most instructions have 2 to 3
// operands it's better to introduce helpers that pass from 0 to 6 operands that help to reduce the size of emit(...)
// function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(uint32_t instId);
ASMJIT_API Error _emitI(InstId instId);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
template<typename... Args>
ASMJIT_INLINE Error emit(uint32_t instId, Args&&... operands) {
ASMJIT_FORCE_INLINE Error emit(InstId instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
}
inline Error emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount) {
ASMJIT_FORCE_INLINE Error emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
}
inline Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
ASMJIT_FORCE_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
@@ -568,9 +570,9 @@ public:
//! \cond INTERNAL
//! Emits an instruction - all 6 operands must be defined.
virtual Error _emit(uint32_t instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
virtual Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) = 0;
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(uint32_t instId, const Operand_* operands, size_t opCount);
ASMJIT_API virtual Error _emitOpArray(InstId instId, const Operand_* operands, size_t opCount);
//! \endcond
//! \}
@@ -589,9 +591,10 @@ public:
//! Aligns the current CodeBuffer position to the `alignment` specified.
//!
//! The sequence that is used to fill the gap between the aligned location
//! and the current location depends on the align `mode`, see \ref AlignMode.
virtual Error align(uint32_t alignMode, uint32_t alignment) = 0;
//! The sequence that is used to fill the gap between the aligned location and the current location depends on the
//! align `mode`, see \ref AlignMode. The `alignment` argument specifies alignment in bytes, so for example when
//! it's `32` it means that the code buffer will be aligned to `32` bytes.
virtual Error align(AlignMode alignMode, uint32_t alignment) = 0;
//! \}
@@ -604,49 +607,49 @@ public:
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//! - Assign a `typeId` to the data, so the emitter knows the type of
//! items stored in `data`. Binary data should use \ref Type::kIdU8.
//! - Repeat the given data `repeatCount` times, so the data can be used
//! as a fill pattern for example, or as a pattern used by SIMD instructions.
virtual Error embedDataArray(uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
//!
//! - Assign a `typeId` to the data, so the emitter knows the type of items stored in `data`. Binary data should
//! use \ref TypeId::kUInt8.
//!
//! - Repeat the given data `repeatCount` times, so the data can be used as a fill pattern for example, or as a
//! pattern used by SIMD instructions.
virtual Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) = 0;
//! Embeds int8_t `value` repeated by `repeatCount`.
inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI8, &value, 1, repeatCount); }
inline Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU8, &value, 1, repeatCount); }
inline Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI16, &value, 1, repeatCount); }
inline Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU16, &value, 1, repeatCount); }
inline Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI32, &value, 1, repeatCount); }
inline Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU32, &value, 1, repeatCount); }
inline Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdI64, &value, 1, repeatCount); }
inline Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(Type::kIdU64, &value, 1, repeatCount); }
inline Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(Type::kIdF32, &value, 1, repeatCount); }
inline Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<float>::kTypeId), &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(Type::IdOfT<double>::kTypeId, &value, 1, repeatCount); }
inline Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<double>::kTypeId), &value, 1, repeatCount); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using kAlignData to the minimum `pool` alignment.
//! 1. Aligns by using AlignMode::kData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
virtual Error embedConstPool(const Label& label, const ConstPool& pool) = 0;
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the
//! size of the address data. If it's zero (default) the address size is
//! deduced from the target architecture (either 4 or 8 bytes).
//! The `dataSize` is an optional argument that can be used to specify the size of the address data. If it's zero
//! (default) the address size is deduced from the target architecture (either 4 or 8 bytes).
virtual Error embedLabel(const Label& label, size_t dataSize = 0) = 0;
//! Embeds a delta (distance) between the `label` and `base` calculating it
//! as `label - base`. This function was designed to make it easier to embed
//! lookup tables where each index is a relative distance of two labels.
//! Embeds a delta (distance) between the `label` and `base` calculating it as `label - base`. This function was
//! designed to make it easier to embed lookup tables where each index is a relative distance of two labels.
virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) = 0;
//! \}
@@ -672,48 +675,20 @@ public:
//! Called after the emitter was detached from `CodeHolder`.
virtual Error onDetach(CodeHolder* code) noexcept = 0;
//! Called when \ref CodeHolder has updated an important setting, which
//! involves the following:
//! Called when \ref CodeHolder has updated an important setting, which involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been
//! called).
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler()
//! has been called).
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been called).
//!
//! This function ensures that the settings are properly propagated from
//! \ref CodeHolder to the emitter.
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler() has been called).
//!
//! \note This function is virtual and can be overridden, however, if you
//! do so, always call \ref BaseEmitter::onSettingsUpdated() within your
//! own implementation to ensure that the emitter is in a consistent state.
//! This function ensures that the settings are properly propagated from \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you do so, always call \ref
//! BaseEmitter::onSettingsUpdated() within your own implementation to ensure that the emitter is
//! in a consistent state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use environment() instead")
inline CodeInfo codeInfo() const noexcept {
return CodeInfo(_environment, _code ? _code->baseAddress() : Globals::kNoBaseAddress);
}
ASMJIT_DEPRECATED("Use arch() instead")
inline uint32_t archId() const noexcept { return arch(); }
ASMJIT_DEPRECATED("Use registerSize() instead")
inline uint32_t gpSize() const noexcept { return registerSize(); }
ASMJIT_DEPRECATED("Use encodingOptions() instead")
inline uint32_t emitterOptions() const noexcept { return encodingOptions(); }
ASMJIT_DEPRECATED("Use addEncodingOptions() instead")
inline void addEmitterOptions(uint32_t options) noexcept { addEncodingOptions(options); }
ASMJIT_DEPRECATED("Use clearEncodingOptions() instead")
inline void clearEmitterOptions(uint32_t options) noexcept { clearEncodingOptions(options); }
ASMJIT_DEPRECATED("Use forcedInstOptions() instead")
inline uint32_t globalInstOptions() const noexcept { return forcedInstOptions(); }
#endif // !ASMJIT_NO_DEPRECATED
};
//! \}

View File

@@ -1,57 +1,33 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/assembler.h"
#include "../core/emitterutils_p.h"
#include "../core/formatter.h"
#include "../core/formatter_p.h"
#include "../core/logger.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept {
size_t currentSize = sb.size();
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
ASMJIT_ASSERT(binSize >= dispSize);
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept {
ASMJIT_ASSERT(binSize >= offsetSize);
const size_t kNoBinSize = SIZE_MAX;
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
size_t align = kMaxInstLineSize;
char sep = ';';
size_t padding = Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
size_t begin = sb.size();
ASMJIT_PROPAGATE(sb.padEnd(align));
ASMJIT_PROPAGATE(sb.padEnd(padding));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
@@ -60,8 +36,8 @@ Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t disp
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - dispSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', dispSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - offsetSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', offsetSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
}
@@ -69,9 +45,8 @@ Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t disp
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
}
currentSize += sb.size() - begin;
align += kMaxBinarySize;
sep = '|';
padding += Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kMachineCode);
}
}
@@ -82,55 +57,59 @@ void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatOptions::kFlagMachineCode) ? size_t(0) : SIZE_MAX;
size_t binSize = logger->hasFlag(FormatFlags::kMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationLabel));
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append(':');
EmitterUtils::formatLine(sb, nullptr, binSize, 0, 0, self->_inlineComment);
finishFormattedLine(sb, logger->options(), nullptr, binSize, 0, 0, self->_inlineComment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
uint32_t flags = logger->flags();
FormatFlags formatFlags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
sb.appendChars(' ', logger->indentation(FormatOptions::kIndentationCode));
Formatter::formatInstruction(sb, flags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
Formatter::formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if ((flags & FormatOptions::kFlagMachineCode) != 0)
EmitterUtils::formatLine(sb, self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
if (Support::test(formatFlags, FormatFlags::kMachineCode))
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
else
EmitterUtils::formatLine(sb, nullptr, SIZE_MAX, 0, 0, self->inlineComment());
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
logger->log(sb);
}
Error logInstructionFailed(
BaseAssembler* self,
Error err,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
EmitterUtils::opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Formatter::formatInstruction(sb, 0, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
Formatter::formatInstruction(sb, FormatFlags::kNone, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
if (self->inlineComment()) {
sb.append(" ; ");

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
#define ASMJIT_CORE_EMITTERUTILS_P_H_INCLUDED
@@ -30,26 +12,26 @@
ASMJIT_BEGIN_NAMESPACE
class BaseAssembler;
class FormatOptions;
//! \cond INTERNAL
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::EmitterUtils]
// ============================================================================
//! Utilities used by various emitters, mostly Assembler implementations.
namespace EmitterUtils {
static const Operand_ noExt[3] {};
//! Default paddings used by Emitter utils and Formatter.
enum kOpIndex {
static constexpr Operand noExt[3];
enum kOpIndex : uint32_t {
kOp3 = 0,
kOp4 = 1,
kOp5 = 2
};
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
static ASMJIT_FORCE_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
if (opExt[kOp3].isNone()) {
@@ -67,7 +49,7 @@ static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Oper
return opCount;
}
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
static ASMJIT_FORCE_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
@@ -77,25 +59,23 @@ static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount]
}
#ifndef ASMJIT_NO_LOGGING
enum : uint32_t {
// Has to be big to be able to hold all metadata compiler can assign to a
// single instruction.
kMaxInstLineSize = 44,
kMaxBinarySize = 26
};
Error formatLine(String& sb, const uint8_t* binData, size_t binSize, size_t dispSize, size_t immSize, const char* comment) noexcept;
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
BaseAssembler* self,
uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
Error logInstructionFailed(
BaseAssembler* self,
Error err, uint32_t instId, uint32_t options, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
Error err,
InstId instId,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
#endif
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/environment.h"

View File

@@ -1,30 +1,12 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#define ASMJIT_CORE_ENVIRONMENT_H_INCLUDED
#include "../core/globals.h"
#include "../core/archtraits.h"
#if defined(__APPLE__)
#include <TargetConditionals.h>
@@ -35,301 +17,222 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::Environment]
// ============================================================================
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided for future use, if required.
enum class Vendor : uint8_t {
//! Unknown or uninitialized platform vendor.
kUnknown = 0,
//! Maximum value of `PlatformVendor`.
kMaxValue = kUnknown,
//! Platform vendor detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#else
kUnknown
#endif
};
//! Platform - runtime environment or operating system.
enum class Platform : uint8_t {
//! Unknown or uninitialized platform.
kUnknown = 0,
//! Windows OS.
kWindows,
//! Other platform that is not Windows, most likely POSIX based.
kOther,
//! Linux OS.
kLinux,
//! GNU/Hurd OS.
kHurd,
//! FreeBSD OS.
kFreeBSD,
//! OpenBSD OS.
kOpenBSD,
//! NetBSD OS.
kNetBSD,
//! DragonFly BSD OS.
kDragonFlyBSD,
//! Haiku OS.
kHaiku,
//! Apple OSX.
kOSX,
//! Apple iOS.
kIOS,
//! Apple TVOS.
kTVOS,
//! Apple WatchOS.
kWatchOS,
//! Emscripten platform.
kEmscripten,
//! Maximum value of `Platform`.
kMaxValue = kEmscripten,
//! Platform detected at compile-time (platform of the host).
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(__EMSCRIPTEN__)
kEmscripten
#elif defined(_WIN32)
kWindows
#elif defined(__linux__)
kLinux
#elif defined(__gnu_hurd__)
kHurd
#elif defined(__FreeBSD__)
kFreeBSD
#elif defined(__OpenBSD__)
kOpenBSD
#elif defined(__NetBSD__)
kNetBSD
#elif defined(__DragonFly__)
kDragonFlyBSD
#elif defined(__HAIKU__)
kHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kIOS
#else
kOther
#endif
};
//! Platform ABI (application binary interface).
enum class PlatformABI : uint8_t {
//! Unknown or uninitialied environment.
kUnknown = 0,
//! Microsoft ABI.
kMSVC,
//! GNU ABI.
kGNU,
//! Android Environment / ABI.
kAndroid,
//! Cygwin ABI.
kCygwin,
//! Maximum value of `PlatformABI`.
kMaxValue,
//! Host ABI detected at compile-time.
kHost =
#if defined(_DOXYGEN)
DETECTED_AT_COMPILE_TIME
#elif defined(_MSC_VER)
kMSVC
#elif defined(__CYGWIN__)
kCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kGNU
#elif defined(__ANDROID__)
kAndroid
#else
kUnknown
#endif
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref ObjectFormat::kUnknown and \ref ObjectFormat::kJIT at
//! the moment. Object file formats are provided for future extensibility and a possibility to generate object
//! files at some point.
enum class ObjectFormat : uint8_t {
//! Unknown or uninitialized object format.
kUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kJIT,
//! Executable and linkable format (ELF).
kELF,
//! Common object file format.
kCOFF,
//! Extended COFF object format.
kXCOFF,
//! Mach object file format.
kMachO,
//! Maximum value of `ObjectFormat`.
kMaxValue
};
//! Represents an environment, which is usually related to a \ref Target.
//!
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is
//! sometimes called "Triple" (historically it used to be 3 only parts) or
//! "Tuple", which is a convention used by Debian Linux.
//! Environment has usually an 'arch-subarch-vendor-os-abi' format, which is sometimes called "Triple" (historically
//! it used to be 3 only parts) or "Tuple", which is a convention used by Debian Linux.
//!
//! AsmJit doesn't support all possible combinations or architectures and ABIs,
//! however, it models the environment similarly to other compilers for future
//! extensibility.
//! AsmJit doesn't support all possible combinations or architectures and ABIs, however, it models the environment
//! similarly to other compilers for future extensibility.
class Environment {
public:
//! Architecture type, see \ref Arch.
uint8_t _arch;
//! Sub-architecture type, see \ref SubArch.
uint8_t _subArch;
//! Vendor type, see \ref Vendor.
uint8_t _vendor;
//! Platform type, see \ref Platform.
uint8_t _platform;
//! ABI type, see \ref Abi.
uint8_t _abi;
//! Object format, see \ref Format.
uint8_t _format;
//! Reserved for future use, must be zero.
uint16_t _reserved;
//! Architecture.
enum Arch : uint32_t {
//! Unknown or uninitialized architecture.
kArchUnknown = 0,
//! Mask used by 32-bit architectures (odd are 32-bit, even are 64-bit).
kArch32BitMask = 0x01,
//! Mask used by big-endian architectures.
kArchBigEndianMask = 0x80u,
//! 32-bit X86 architecture.
kArchX86 = 1,
//! 64-bit X86 architecture also known as X86_64 and AMD64.
kArchX64 = 2,
//! 32-bit RISC-V architecture.
kArchRISCV32 = 3,
//! 64-bit RISC-V architecture.
kArchRISCV64 = 4,
//! 32-bit ARM architecture (little endian).
kArchARM = 5,
//! 32-bit ARM architecture (big endian).
kArchARM_BE = kArchARM | kArchBigEndianMask,
//! 64-bit ARM architecture in (little endian).
kArchAArch64 = 6,
//! 64-bit ARM architecture in (big endian).
kArchAArch64_BE = kArchAArch64 | kArchBigEndianMask,
//! 32-bit ARM in Thumb mode (little endian).
kArchThumb = 7,
//! 32-bit ARM in Thumb mode (big endian).
kArchThumb_BE = kArchThumb | kArchBigEndianMask,
// 8 is not used, even numbers are 64-bit architectures.
//! 32-bit MIPS architecture in (little endian).
kArchMIPS32_LE = 9,
//! 32-bit MIPS architecture in (big endian).
kArchMIPS32_BE = kArchMIPS32_LE | kArchBigEndianMask,
//! 64-bit MIPS architecture in (little endian).
kArchMIPS64_LE = 10,
//! 64-bit MIPS architecture in (big endian).
kArchMIPS64_BE = kArchMIPS64_LE | kArchBigEndianMask,
//! Count of architectures.
kArchCount = 11
};
//! Sub-architecture.
enum SubArch : uint32_t {
//! Unknown or uninitialized architecture sub-type.
kSubArchUnknown = 0,
//! Count of sub-architectures.
kSubArchCount
};
//! Vendor.
//!
//! \note AsmJit doesn't use vendor information at the moment. It's provided
//! for future use, if required.
enum Vendor : uint32_t {
//! Unknown or uninitialized vendor.
kVendorUnknown = 0,
//! Count of vendor identifiers.
kVendorCount
};
//! Platform / OS.
enum Platform : uint32_t {
//! Unknown or uninitialized platform.
kPlatformUnknown = 0,
//! Windows OS.
kPlatformWindows,
//! Other platform, most likely POSIX based.
kPlatformOther,
//! Linux OS.
kPlatformLinux,
//! GNU/Hurd OS.
kPlatformHurd,
//! FreeBSD OS.
kPlatformFreeBSD,
//! OpenBSD OS.
kPlatformOpenBSD,
//! NetBSD OS.
kPlatformNetBSD,
//! DragonFly BSD OS.
kPlatformDragonFlyBSD,
//! Haiku OS.
kPlatformHaiku,
//! Apple OSX.
kPlatformOSX,
//! Apple iOS.
kPlatformIOS,
//! Apple TVOS.
kPlatformTVOS,
//! Apple WatchOS.
kPlatformWatchOS,
//! Emscripten platform.
kPlatformEmscripten,
//! Count of platform identifiers.
kPlatformCount
};
//! ABI.
enum Abi : uint32_t {
//! Unknown or uninitialied environment.
kAbiUnknown = 0,
//! Microsoft ABI.
kAbiMSVC,
//! GNU ABI.
kAbiGNU,
//! Android Environment / ABI.
kAbiAndroid,
//! Cygwin ABI.
kAbiCygwin,
//! Count of known ABI types.
kAbiCount
};
//! Object format.
//!
//! \note AsmJit doesn't really use anything except \ref kFormatUnknown and
//! \ref kFormatJIT at the moment. Object file formats are provided for
//! future extensibility and a possibility to generate object files at some
//! point.
enum Format : uint32_t {
//! Unknown or uninitialized object format.
kFormatUnknown = 0,
//! JIT code generation object, most likely \ref JitRuntime or a custom
//! \ref Target implementation.
kFormatJIT,
//! Executable and linkable format (ELF).
kFormatELF,
//! Common object file format.
kFormatCOFF,
//! Extended COFF object format.
kFormatXCOFF,
//! Mach object file format.
kFormatMachO,
//! Count of object format types.
kFormatCount
};
//! \name Environment Detection
//! \name Members
//! \{
#ifdef _DOXYGEN
//! Architecture detected at compile-time (architecture of the host).
static constexpr Arch kArchHost = DETECTED_AT_COMPILE_TIME;
//! Sub-architecture detected at compile-time (sub-architecture of the host).
static constexpr SubArch kSubArchHost = DETECTED_AT_COMPILE_TIME;
//! Vendor detected at compile-time (vendor of the host).
static constexpr Vendor kVendorHost = DETECTED_AT_COMPILE_TIME;
//! Platform detected at compile-time (platform of the host).
static constexpr Platform kPlatformHost = DETECTED_AT_COMPILE_TIME;
//! ABI detected at compile-time (ABI of the host).
static constexpr Abi kAbiHost = DETECTED_AT_COMPILE_TIME;
#else
static constexpr Arch kArchHost =
ASMJIT_ARCH_X86 == 32 ? kArchX86 :
ASMJIT_ARCH_X86 == 64 ? kArchX64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kArchARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kArchARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kArchAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kArchAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kArchMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kArchMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kArchMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kArchMIPS64_BE :
kArchUnknown;
static constexpr SubArch kSubArchHost =
kSubArchUnknown;
static constexpr Vendor kVendorHost =
kVendorUnknown;
static constexpr Platform kPlatformHost =
#if defined(__EMSCRIPTEN__)
kPlatformEmscripten
#elif defined(_WIN32)
kPlatformWindows
#elif defined(__linux__)
kPlatformLinux
#elif defined(__gnu_hurd__)
kPlatformHurd
#elif defined(__FreeBSD__)
kPlatformFreeBSD
#elif defined(__OpenBSD__)
kPlatformOpenBSD
#elif defined(__NetBSD__)
kPlatformNetBSD
#elif defined(__DragonFly__)
kPlatformDragonFlyBSD
#elif defined(__HAIKU__)
kPlatformHaiku
#elif defined(__APPLE__) && TARGET_OS_OSX
kPlatformOSX
#elif defined(__APPLE__) && TARGET_OS_TV
kPlatformTVOS
#elif defined(__APPLE__) && TARGET_OS_WATCH
kPlatformWatchOS
#elif defined(__APPLE__) && TARGET_OS_IPHONE
kPlatformIOS
#else
kPlatformOther
#endif
;
static constexpr Abi kAbiHost =
#if defined(_MSC_VER)
kAbiMSVC
#elif defined(__CYGWIN__)
kAbiCygwin
#elif defined(__MINGW32__) || defined(__GLIBC__)
kAbiGNU
#elif defined(__ANDROID__)
kAbiAndroid
#else
kAbiUnknown
#endif
;
#endif
//! Architecture.
Arch _arch;
//! Sub-architecture type.
SubArch _subArch;
//! Vendor type.
Vendor _vendor;
//! Platform.
Platform _platform;
//! Platform ABI.
PlatformABI _platformABI;
//! Object format.
ObjectFormat _objectFormat;
//! Reserved for future use, must be zero.
uint8_t _reserved[2];
//! \}
//! \name Construction / Destruction
//! \name Construction & Destruction
//! \{
inline Environment() noexcept :
_arch(uint8_t(kArchUnknown)),
_subArch(uint8_t(kSubArchUnknown)),
_vendor(uint8_t(kVendorUnknown)),
_platform(uint8_t(kPlatformUnknown)),
_abi(uint8_t(kAbiUnknown)),
_format(uint8_t(kFormatUnknown)),
_reserved(0) {}
_arch(Arch::kUnknown),
_subArch(SubArch::kUnknown),
_vendor(Vendor::kUnknown),
_platform(Platform::kUnknown),
_platformABI(PlatformABI::kUnknown),
_objectFormat(ObjectFormat::kUnknown),
_reserved { 0, 0 } {}
inline explicit Environment(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI abi = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown) noexcept {
init(arch, subArch, vendor, platform, abi, objectFormat);
}
inline Environment(const Environment& other) noexcept = default;
inline explicit Environment(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
init(arch, subArch, vendor, platform, abi, format);
//! Returns the host environment constructed from preprocessor macros defined by the compiler.
//!
//! The returned environment should precisely match the target host architecture, sub-architecture, platform,
//! and ABI.
static inline Environment host() noexcept {
return Environment(Arch::kHost, SubArch::kHost, Vendor::kHost, Platform::kHost, PlatformABI::kHost, ObjectFormat::kUnknown);
}
//! \}
@@ -358,7 +261,7 @@ public:
//! Tests whether the environment is initialized, which means it must have
//! a valid architecture.
inline bool isInitialized() const noexcept {
return _arch != kArchUnknown;
return _arch != Arch::kUnknown;
}
inline uint64_t _packed() const noexcept {
@@ -369,56 +272,60 @@ public:
//! Resets all members of the environment to zero / unknown.
inline void reset() noexcept {
_arch = uint8_t(kArchUnknown);
_subArch = uint8_t(kSubArchUnknown);
_vendor = uint8_t(kVendorUnknown);
_platform = uint8_t(kPlatformUnknown);
_abi = uint8_t(kAbiUnknown);
_format = uint8_t(kFormatUnknown);
_reserved = 0;
_arch = Arch::kUnknown;
_subArch = SubArch::kUnknown;
_vendor = Vendor::kUnknown;
_platform = Platform::kUnknown;
_platformABI = PlatformABI::kUnknown;
_objectFormat = ObjectFormat::kUnknown;
_reserved[0] = 0;
_reserved[1] = 0;
}
inline bool equals(const Environment& other) const noexcept {
return _packed() == other._packed();
}
//! Returns the architecture, see \ref Arch.
inline uint32_t arch() const noexcept { return _arch; }
//! Returns the sub-architecture, see \ref SubArch.
inline uint32_t subArch() const noexcept { return _subArch; }
//! Returns vendor, see \ref Vendor.
inline uint32_t vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system, see \ref Platform.
inline uint32_t platform() const noexcept { return _platform; }
//! Returns target's ABI, see \ref Abi.
inline uint32_t abi() const noexcept { return _abi; }
//! Returns target's object format, see \ref Format.
inline uint32_t format() const noexcept { return _format; }
//! Returns the architecture.
inline Arch arch() const noexcept { return _arch; }
//! Returns the sub-architecture.
inline SubArch subArch() const noexcept { return _subArch; }
//! Returns vendor.
inline Vendor vendor() const noexcept { return _vendor; }
//! Returns target's platform or operating system.
inline Platform platform() const noexcept { return _platform; }
//! Returns target's ABI.
inline PlatformABI platformABI() const noexcept { return _platformABI; }
//! Returns target's object format.
inline ObjectFormat objectFormat() const noexcept { return _objectFormat; }
inline void init(uint32_t arch,
uint32_t subArch = kSubArchUnknown,
uint32_t vendor = kVendorUnknown,
uint32_t platform = kPlatformUnknown,
uint32_t abi = kAbiUnknown,
uint32_t format = kFormatUnknown) noexcept {
_arch = uint8_t(arch);
_subArch = uint8_t(subArch);
_vendor = uint8_t(vendor);
_platform = uint8_t(platform);
_abi = uint8_t(abi);
_format = uint8_t(format);
_reserved = 0;
inline void init(
Arch arch,
SubArch subArch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown) noexcept {
_arch = arch;
_subArch = subArch;
_vendor = vendor;
_platform = platform;
_platformABI = platformABI;
_objectFormat = objectFormat;
_reserved[0] = 0;
_reserved[1] = 0;
}
inline bool isArchX86() const noexcept { return _arch == kArchX86; }
inline bool isArchX64() const noexcept { return _arch == kArchX64; }
inline bool isArchRISCV32() const noexcept { return _arch == kArchRISCV32; }
inline bool isArchRISCV64() const noexcept { return _arch == kArchRISCV64; }
inline bool isArchARM() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchARM; }
inline bool isArchThumb() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchThumb; }
inline bool isArchAArch64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchAArch64; }
inline bool isArchMIPS32() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS32_LE; }
inline bool isArchMIPS64() const noexcept { return (_arch & ~kArchBigEndianMask) == kArchMIPS64_LE; }
inline bool isArchX86() const noexcept { return _arch == Arch::kX86; }
inline bool isArchX64() const noexcept { return _arch == Arch::kX64; }
inline bool isArchARM() const noexcept { return isArchARM(_arch); }
inline bool isArchThumb() const noexcept { return isArchThumb(_arch); }
inline bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
inline bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
inline bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
inline bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
inline bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit.
inline bool is32Bit() const noexcept { return is32Bit(_arch); }
@@ -432,45 +339,45 @@ public:
//! Tests whether this architecture is of X86 family.
inline bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether this architecture family is ARM, Thumb, or AArch64.
inline bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
inline bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
inline bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
//! Tests whether the environment platform is Windows.
inline bool isPlatformWindows() const noexcept { return _platform == kPlatformWindows; }
inline bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux.
inline bool isPlatformLinux() const noexcept { return _platform == kPlatformLinux; }
inline bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd.
inline bool isPlatformHurd() const noexcept { return _platform == kPlatformHurd; }
inline bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku.
inline bool isPlatformHaiku() const noexcept { return _platform == kPlatformHaiku; }
inline bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD.
inline bool isPlatformBSD() const noexcept {
return _platform == kPlatformFreeBSD ||
_platform == kPlatformOpenBSD ||
_platform == kPlatformNetBSD ||
_platform == kPlatformDragonFlyBSD;
return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD ||
_platform == Platform::kNetBSD ||
_platform == Platform::kDragonFlyBSD;
}
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
inline bool isPlatformApple() const noexcept {
return _platform == kPlatformOSX ||
_platform == kPlatformIOS ||
_platform == kPlatformTVOS ||
_platform == kPlatformWatchOS;
return _platform == Platform::kOSX ||
_platform == Platform::kIOS ||
_platform == Platform::kTVOS ||
_platform == Platform::kWatchOS;
}
//! Tests whether the ABI is MSVC.
inline bool isAbiMSVC() const noexcept { return _abi == kAbiMSVC; }
inline bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU.
inline bool isAbiGNU() const noexcept { return _abi == kAbiGNU; }
inline bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
//! Returns a calculated stack alignment for this environment.
ASMJIT_API uint32_t stackAlignment() const noexcept;
@@ -479,134 +386,109 @@ public:
uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
//! Sets the architecture to `arch`.
inline void setArch(uint32_t arch) noexcept { _arch = uint8_t(arch); }
inline void setArch(Arch arch) noexcept { _arch = arch; }
//! Sets the sub-architecture to `subArch`.
inline void setSubArch(uint32_t subArch) noexcept { _subArch = uint8_t(subArch); }
inline void setSubArch(SubArch subArch) noexcept { _subArch = subArch; }
//! Sets the vendor to `vendor`.
inline void setVendor(uint32_t vendor) noexcept { _vendor = uint8_t(vendor); }
inline void setVendor(Vendor vendor) noexcept { _vendor = vendor; }
//! Sets the platform to `platform`.
inline void setPlatform(uint32_t platform) noexcept { _platform = uint8_t(platform); }
//! Sets the ABI to `abi`.
inline void setAbi(uint32_t abi) noexcept { _abi = uint8_t(abi); }
//! Sets the object format to `format`.
inline void setFormat(uint32_t format) noexcept { _format = uint8_t(format); }
inline void setPlatform(Platform platform) noexcept { _platform = platform; }
//! Sets the ABI to `platformABI`.
inline void setPlatformABI(PlatformABI platformABI) noexcept { _platformABI = platformABI; }
//! Sets the object format to `objectFormat`.
inline void setObjectFormat(ObjectFormat objectFormat) noexcept { _objectFormat = objectFormat; }
//! \}
//! \name Static Utilities
//! \{
static inline bool isValidArch(uint32_t arch) noexcept {
return (arch & ~kArchBigEndianMask) != 0 &&
(arch & ~kArchBigEndianMask) < kArchCount;
static inline bool isDefinedArch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
static inline bool isValidArch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
//! Tests whether the given architecture `arch` is 32-bit.
static inline bool is32Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == kArch32BitMask;
static inline bool is32Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
}
//! Tests whether the given architecture `arch` is 64-bit.
static inline bool is64Bit(uint32_t arch) noexcept {
return (arch & kArch32BitMask) == 0;
static inline bool is64Bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
static inline bool isLittleEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == 0;
static inline bool isLittleEndian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture `arch` is big endian.
static inline bool isBigEndian(uint32_t arch) noexcept {
return (arch & kArchBigEndianMask) == kArchBigEndianMask;
static inline bool isBigEndian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture is AArch64.
static inline bool isArchAArch64(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchAArch64;
//! Tests whether the given architecture is ARM or ARM_BE.
static inline bool isArchARM(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE;
}
//! Tests whether the given architecture is Thumb or Thumb_BE.
static inline bool isArchThumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
}
//! Tests whether the given architecture is AArch64 or AArch64_BE.
static inline bool isArchAArch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
}
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
static inline bool isArchMIPS32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
}
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
static inline bool isArchMIPS64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
}
//! Tests whether the given architecture family is X86 or X64.
static inline bool isFamilyX86(uint32_t arch) noexcept {
return arch == kArchX86 ||
arch == kArchX64;
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
static inline bool isFamilyRISCV(uint32_t arch) noexcept {
return arch == kArchRISCV32 ||
arch == kArchRISCV64;
static inline bool isFamilyX86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64;
}
//! Tests whether the given architecture family is ARM, Thumb, or AArch64.
static inline bool isFamilyARM(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchARM ||
arch == kArchAArch64 ||
arch == kArchThumb;
static inline bool isFamilyARM(Arch arch) noexcept {
return isArchARM(arch) || isArchAArch64(arch) || isArchThumb(arch);
}
//! Tests whether the given architecture family is MISP or MIPS64.
static inline bool isFamilyMIPS(uint32_t arch) noexcept {
arch &= ~kArchBigEndianMask;
return arch == kArchMIPS32_LE ||
arch == kArchMIPS64_LE;
static inline bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch);
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
static inline bool isFamilyRISCV(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
}
//! Returns a native general purpose register size from the given architecture.
static uint32_t registerSizeFromArch(uint32_t arch) noexcept {
static inline uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
}
//! \}
};
//! Returns the host environment constructed from preprocessor macros defined
//! by the compiler.
//!
//! The returned environment should precisely match the target host architecture,
//! sub-architecture, platform, and ABI.
static ASMJIT_INLINE Environment hostEnvironment() noexcept {
return Environment(Environment::kArchHost,
Environment::kSubArchHost,
Environment::kVendorHost,
Environment::kPlatformHost,
Environment::kAbiHost,
Environment::kFormatUnknown);
}
static_assert(sizeof(Environment) == 8,
"Environment must occupy exactly 8 bytes.");
//! \}
#ifndef ASMJIT_NO_DEPRECATED
class ASMJIT_DEPRECATED_STRUCT("Use Environment instead") ArchInfo : public Environment {
public:
inline ArchInfo() noexcept : Environment() {}
inline ArchInfo(const Environment& other) noexcept : Environment(other) {}
inline explicit ArchInfo(uint32_t arch, uint32_t subArch = kSubArchUnknown) noexcept
: Environment(arch, subArch) {}
enum Id : uint32_t {
kIdNone = Environment::kArchUnknown,
kIdX86 = Environment::kArchX86,
kIdX64 = Environment::kArchX64,
kIdA32 = Environment::kArchARM,
kIdA64 = Environment::kArchAArch64,
kIdHost = Environment::kArchHost
};
enum SubType : uint32_t {
kSubIdNone = Environment::kSubArchUnknown
};
static inline ArchInfo host() noexcept { return ArchInfo(hostEnvironment()); }
};
#endif // !ASMJIT_NO_DEPRECATED
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ENVIRONMENT_H_INCLUDED

View File

@@ -1,36 +1,13 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
#define ASMJIT_CORE_ERRORHANDLER_H_INCLUDED
@@ -31,41 +13,28 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_error_handling
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseEmitter;
// ============================================================================
// [asmjit::ErrorHandler]
// ============================================================================
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override
//! \ref ErrorHandler::handleError() to implement your own error handler.
//! It's available to all classes that inherit `BaseEmitter`. Override \ref ErrorHandler::handleError() to implement
//! your own error handler.
//!
//! The following use-cases are supported:
//!
//! - Record the error and continue code generation. This is the simplest
//! approach that can be used to at least log possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but it's perfectly legal to throw an exception from
//! the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler,
//! Builder and Compiler to a consistent state before calling \ref handleError(),
//! so `longjmp()` can be used without issues to cancel the code-generation if
//! an error occurred. This method can be used if exception handling in your
//! project is turned off and you still want some comfort. In most cases it
//! should be safe as AsmJit uses \ref Zone memory and the ownership of memory
//! it allocates always ends with the instance that allocated it. If using this
//! approach please never jump outside the life-time of \ref CodeHolder and
//! \ref BaseEmitter.
//! - Record the error and continue code generation. This is the simplest approach that can be used to at least log
//! possible errors.
//! - Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal
//! to throw an exception from the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler, Builder and Compiler to
//! a consistent state before calling \ref handleError(), so `longjmp()` can be used without issues to cancel the
//! code generation if an error occurred. This method can be used if exception handling in your project is turned
//! off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref Zone memory and the
//! ownership of memory it allocates always ends with the instance that allocated it. If using this approach please
//! never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter,
//! which has a priority. The example below uses error handler that just prints
//! the error, but lets AsmJit continue:
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter, which has a priority. The example below
//! uses error handler that just prints the error, but lets AsmJit continue:
//!
//! ```
//! // Error Handling #1 - Logging and returning Error.
@@ -108,12 +77,10 @@ class BaseEmitter;
//! }
//! ```
//!
//! If error happens during instruction emitting / encoding the assembler behaves
//! transactionally - the output buffer won't advance if encoding failed, thus
//! either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors
//! in AsmJit. The following example shows how to use exception handling to handle
//! errors in a more C++ way:
//! If error happens during instruction emitting / encoding the assembler behaves transactionally - the output buffer
//! won't advance if encoding failed, thus either a fully encoded instruction or nothing is emitted. The error handling
//! shown above is useful, but it's still not the best way of dealing with errors in AsmJit. The following example
//! shows how to use exception handling to handle errors in a more C++ way:
//!
//! ```
//! // Error Handling #2 - Throwing an exception.
@@ -168,13 +135,10 @@ class BaseEmitter;
//! }
//! ```
//!
//! If C++ exceptions are not what you like or your project turns off them
//! completely there is still a way of reducing the error handling to a minimum
//! by using a standard setjmp/longjmp approach. AsmJit is exception-safe and
//! cleans up everything before calling the ErrorHandler, so any approach is
//! safe. You can simply jump from the error handler without causing any
//! side-effects or memory leaks. The following example demonstrates how it
//! could be done:
//! If C++ exceptions are not what you like or your project turns off them completely there is still a way of reducing
//! the error handling to a minimum by using a standard setjmp/longjmp approach. AsmJit is exception-safe and cleans
//! up everything before calling the ErrorHandler, so any approach is safe. You can simply jump from the error handler
//! without causing any side-effects or memory leaks. The following example demonstrates how it could be done:
//!
//! ```
//! // Error Handling #3 - Using setjmp/longjmp if exceptions are not allowed.
@@ -223,40 +187,37 @@ class ASMJIT_VIRTAPI ErrorHandler {
public:
ASMJIT_BASE_CLASS(ErrorHandler)
// --------------------------------------------------------------------------
// [Construction / Destruction]
// --------------------------------------------------------------------------
//! \name Construction & Destruction
//! \{
//! Creates a new `ErrorHandler` instance.
ASMJIT_API ErrorHandler() noexcept;
//! Destroys the `ErrorHandler` instance.
ASMJIT_API virtual ~ErrorHandler() noexcept;
// --------------------------------------------------------------------------
// [Handle Error]
// --------------------------------------------------------------------------
//! \}
//! \name Interface
//! \{
//! Error handler (must be reimplemented).
//!
//! Error handler is called after an error happened and before it's propagated
//! to the caller. There are multiple ways how the error handler can be used:
//! Error handler is called after an error happened and before it's propagated to the caller. There are multiple
//! ways how the error handler can be used:
//!
//! 1. User-based error handling without throwing exception or using C's
//! `longjmp()`. This is for users that don't use exceptions and want
//! customized error handling.
//! 1. User-based error handling without throwing exception or using C's`longjmp()`. This is for users that don't
//! use exceptions and want customized error handling.
//!
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely
//! exception-safe, but you can throw exception from your error handler if
//! this way is the preferred way of handling errors in your project.
//! 2. Throwing an exception. AsmJit doesn't use exceptions and is completely exception-safe, but you can throw
//! exception from your error handler if this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts
//! `BaseEmitter` to a consistent state before calling `handleError()`
//! so `longjmp()` can be used without any issues to cancel the code
//! generation if an error occurred. There is no difference between
//! exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you
//! would leak memory.
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `BaseEmitter` to a consistent state before
//! calling `handleError()` so `longjmp()` can be used without any issues to cancel the code generation if an
//! error occurred. There is no difference between exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you would leak memory.
virtual void handleError(Error err, const char* message, BaseEmitter* origin) = 0;
//! \}
};
//! \}

View File

@@ -1,186 +0,0 @@
// AsmJit - Machine code generation for C++
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_FEATURES_H_INCLUDED
#define ASMJIT_CORE_FEATURES_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::BaseFeatures]
// ============================================================================
//! Base class that provides information about CPU features.
//!
//! Internally each feature is represented by a single bit in an embedded
//! bit-array, however, feature bits are defined by an architecture specific
//! implementations, like \ref x86::Features.
class BaseFeatures {
public:
typedef Support::BitWord BitWord;
typedef Support::BitVectorIterator<BitWord> Iterator;
enum : uint32_t {
kMaxFeatures = 256,
kNumBitWords = kMaxFeatures / Support::kBitWordSizeInBits
};
BitWord _bits[kNumBitWords];
//! \name Construction & Destruction
//! \{
inline BaseFeatures() noexcept { reset(); }
inline BaseFeatures(const BaseFeatures& other) noexcept = default;
inline explicit BaseFeatures(Globals::NoInit_) noexcept {}
inline void reset() noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
_bits[i] = 0;
}
//! \}
//! \name Overloaded Operators
//! \{
inline BaseFeatures& operator=(const BaseFeatures& other) noexcept = default;
inline bool operator==(const BaseFeatures& other) noexcept { return eq(other); }
inline bool operator!=(const BaseFeatures& other) noexcept { return !eq(other); }
//! \}
//! \name Cast
//! \{
//! Casts this base class into a derived type `T`.
template<typename T>
inline T& as() noexcept { return static_cast<T&>(*this); }
//! Casts this base class into a derived type `T` (const).
template<typename T>
inline const T& as() const noexcept { return static_cast<const T&>(*this); }
//! \}
//! \name Accessors
//! \{
inline bool empty() const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if (_bits[i])
return false;
return true;
}
//! Returns all features as array of bitwords (see \ref Support::BitWord).
inline BitWord* bits() noexcept { return _bits; }
//! Returns all features as array of bitwords (const).
inline const BitWord* bits() const noexcept { return _bits; }
//! Returns the number of BitWords returned by \ref bits().
inline size_t bitWordCount() const noexcept { return kNumBitWords; }
//! Returns \ref Support::BitVectorIterator, that can be used to iterate
//! all features efficiently
inline Iterator iterator() const noexcept {
return Iterator(_bits, kNumBitWords);
}
//! Tests whether the feature `featureId` is present.
inline bool has(uint32_t featureId) const noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
return bool((_bits[idx] >> bit) & 0x1);
}
//! Tests whether all features as defined by `other` are present.
inline bool hasAll(const BaseFeatures& other) const noexcept {
for (uint32_t i = 0; i < kNumBitWords; i++)
if ((_bits[i] & other._bits[i]) != other._bits[i])
return false;
return true;
}
//! \}
//! \name Utilities
//! \{
//! Adds the given CPU `featureId` to the list of features.
inline void add(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] |= BitWord(1) << bit;
}
template<typename... Args>
inline void add(uint32_t featureId, Args... otherIds) noexcept {
add(featureId);
add(otherIds...);
}
//! Removes the given CPU `featureId` from the list of features.
inline void remove(uint32_t featureId) noexcept {
ASMJIT_ASSERT(featureId < kMaxFeatures);
uint32_t idx = featureId / Support::kBitWordSizeInBits;
uint32_t bit = featureId % Support::kBitWordSizeInBits;
_bits[idx] &= ~(BitWord(1) << bit);
}
template<typename... Args>
inline void remove(uint32_t featureId, Args... otherIds) noexcept {
remove(featureId);
remove(otherIds...);
}
inline bool eq(const BaseFeatures& other) const noexcept {
for (size_t i = 0; i < kNumBitWords; i++)
if (_bits[i] != other._bits[i])
return false;
return true;
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FEATURES_H_INCLUDED

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
@@ -29,7 +11,7 @@
#include "../core/codeholder.h"
#include "../core/compiler.h"
#include "../core/emitter.h"
#include "../core/formatter.h"
#include "../core/formatter_p.h"
#include "../core/string.h"
#include "../core/support.h"
#include "../core/type.h"
@@ -48,10 +30,6 @@ ASMJIT_BEGIN_NAMESPACE
class VirtReg;
#endif
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
namespace Formatter {
static const char wordNameTable[][8] = {
@@ -72,40 +50,44 @@ static const char wordNameTable[][8] = {
};
Error formatTypeId(String& sb, uint32_t typeId) noexcept {
if (typeId == Type::kIdVoid)
Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid)
return sb.append("void");
if (!Type::isValid(typeId))
if (!TypeUtils::isValid(typeId))
return sb.append("unknown");
const char* typeName = "unknown";
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t typeSize = TypeUtils::sizeOf(typeId);
TypeId scalarType = TypeUtils::scalarOf(typeId);
uint32_t baseId = Type::baseOf(typeId);
switch (baseId) {
case Type::kIdIntPtr : typeName = "iptr" ; break;
case Type::kIdUIntPtr: typeName = "uptr" ; break;
case Type::kIdI8 : typeName = "i8" ; break;
case Type::kIdU8 : typeName = "u8" ; break;
case Type::kIdI16 : typeName = "i16" ; break;
case Type::kIdU16 : typeName = "u16" ; break;
case Type::kIdI32 : typeName = "i32" ; break;
case Type::kIdU32 : typeName = "u32" ; break;
case Type::kIdI64 : typeName = "i64" ; break;
case Type::kIdU64 : typeName = "u64" ; break;
case Type::kIdF32 : typeName = "f32" ; break;
case Type::kIdF64 : typeName = "f64" ; break;
case Type::kIdF80 : typeName = "f80" ; break;
case Type::kIdMask8 : typeName = "mask8" ; break;
case Type::kIdMask16 : typeName = "mask16"; break;
case Type::kIdMask32 : typeName = "mask32"; break;
case Type::kIdMask64 : typeName = "mask64"; break;
case Type::kIdMmx32 : typeName = "mmx32" ; break;
case Type::kIdMmx64 : typeName = "mmx64" ; break;
switch (scalarType) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kInt8 : typeName = "int8" ; break;
case TypeId::kUInt8 : typeName = "uint8" ; break;
case TypeId::kInt16 : typeName = "int16" ; break;
case TypeId::kUInt16 : typeName = "uint16" ; break;
case TypeId::kInt32 : typeName = "int32" ; break;
case TypeId::kUInt32 : typeName = "uint32" ; break;
case TypeId::kInt64 : typeName = "int64" ; break;
case TypeId::kUInt64 : typeName = "uint64" ; break;
case TypeId::kFloat32: typeName = "float32"; break;
case TypeId::kFloat64: typeName = "float64"; break;
case TypeId::kFloat80: typeName = "float80"; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
default:
typeName = "unknown";
break;
}
uint32_t baseSize = Type::sizeOf(baseId);
uint32_t baseSize = TypeUtils::sizeOf(scalarType);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
@@ -117,7 +99,7 @@ Error formatTypeId(String& sb, uint32_t typeId) noexcept {
Error formatFeature(
String& sb,
uint32_t arch,
Arch arch,
uint32_t featureId) noexcept {
#if !defined(ASMJIT_NO_X86)
@@ -135,7 +117,7 @@ Error formatFeature(
Error formatLabel(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
@@ -159,6 +141,9 @@ Error formatLabel(
ASMJIT_PROPAGATE(sb.append('.'));
}
if (le->type() == LabelType::kAnonymous)
ASMJIT_PROPAGATE(sb.append("L%u@", labelId));
return sb.append(le->name());
}
else {
@@ -168,10 +153,10 @@ Error formatLabel(
Error formatRegister(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
Arch arch,
RegType regType,
uint32_t regId) noexcept {
#if !defined(ASMJIT_NO_X86)
@@ -189,9 +174,9 @@ Error formatRegister(
Error formatOperand(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
Arch arch,
const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86)
@@ -209,21 +194,21 @@ Error formatOperand(
ASMJIT_API Error formatDataType(
String& sb,
uint32_t formatFlags,
uint32_t arch,
uint32_t typeId) noexcept
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(arch >= Environment::kArchCount))
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue)))
return DebugUtils::errored(kErrorInvalidArch);
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8)
return DebugUtils::errored(kErrorInvalidState);
uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(_archTraits[arch].isaWordNameId(typeSizeLog2))]);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
}
static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSize, const uint8_t* data, size_t itemCount) noexcept {
@@ -232,7 +217,7 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz
sb.append(' ');
for (size_t i = 0; i < itemCount; i++) {
uint64_t v;
uint64_t v = 0;
if (i != 0)
ASMJIT_PROPAGATE(sb.append(", ", 2));
@@ -244,7 +229,7 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz
case 8: v = Support::readU64u(data); break;
}
ASMJIT_PROPAGATE(sb.appendUInt(v, 16, typeSize * 2, String::kFormatAlternate));
ASMJIT_PROPAGATE(sb.appendUInt(v, 16, typeSize * 2, StringFormatFlags::kAlternate));
data += typeSize;
}
@@ -253,16 +238,16 @@ static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSiz
Error formatData(
String& sb,
uint32_t formatFlags,
uint32_t arch,
uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) noexcept
{
DebugUtils::unused(formatFlags);
if (ASMJIT_UNLIKELY(arch >= Environment::kArchCount))
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch)))
return DebugUtils::errored(kErrorInvalidArch);
uint32_t typeSize = Type::sizeOf(typeId);
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0)
return DebugUtils::errored(kErrorInvalidState);
@@ -277,7 +262,7 @@ Error formatData(
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(_archTraits[arch].isaWordNameId(typeSizeLog2))];
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
if (repeatCount > 1)
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
@@ -287,9 +272,9 @@ Error formatData(
Error formatInstruction(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
#if !defined(ASMJIT_NO_X86)
@@ -308,8 +293,8 @@ Error formatInstruction(
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
uint32_t typeId = value.typeId();
static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
TypeId typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
if (value.isAssigned()) {
@@ -338,10 +323,10 @@ static Error formatFuncValue(String& sb, uint32_t formatFlags, const BaseEmitter
static Error formatFuncValuePack(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncValuePack& pack,
VirtReg* const* vRegs) noexcept {
const RegOnly* vRegs) noexcept {
size_t count = pack.count();
if (!count)
@@ -358,11 +343,16 @@ static Error formatFuncValuePack(
if (valueIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, emitter, value));
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
if (vRegs) {
static const char nullRet[] = "<none>";
ASMJIT_PROPAGATE(sb.appendFormat(" %s", vRegs[valueIndex] ? vRegs[valueIndex]->name() : nullRet));
const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id()))
virtReg = cc->virtRegById(vRegs[valueIndex].id());
ASMJIT_PROPAGATE(sb.appendFormat(" %s", virtReg ? virtReg->name() : nullReg));
}
}
@@ -374,17 +364,17 @@ static Error formatFuncValuePack(
static Error formatFuncRets(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, emitter, fd.retPack(), nullptr);
return formatFuncValuePack(sb, formatFlags, cc, fd.retPack(), nullptr);
}
static Error formatFuncArgs(
String& sb,
uint32_t formatFlags,
const BaseEmitter* emitter,
FormatFlags formatFlags,
const BaseCompiler* cc,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
@@ -396,7 +386,7 @@ static Error formatFuncArgs(
if (argIndex)
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, emitter, fd.argPack(argIndex), argPacks[argIndex]._data));
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
}
return kErrorOk;
@@ -405,25 +395,26 @@ static Error formatFuncArgs(
Error formatNode(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && (formatFlags & FormatOptions::kFlagPositions) != 0)
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions))
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
size_t startLineIndex = sb.size();
switch (node->type()) {
case BaseNode::kNodeInst:
case BaseNode::kNodeJump: {
case NodeType::kInst:
case NodeType::kJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
ASMJIT_PROPAGATE(formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
break;
}
case BaseNode::kNodeSection: {
case NodeType::kSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->id())) {
const Section* section = builder->_code->sectionById(sectionNode->id());
@@ -432,65 +423,64 @@ Error formatNode(
break;
}
case BaseNode::kNodeLabel: {
case NodeType::kLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, labelNode->labelId()));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, labelNode->labelId()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case BaseNode::kNodeAlign: {
case NodeType::kAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(
sb.appendFormat(".align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == kAlignCode ? "code" : "data"));
ASMJIT_PROPAGATE(sb.appendFormat(".align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == AlignMode::kCode ? "code" : "data"));
break;
}
case BaseNode::kNodeEmbedData: {
case NodeType::kEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatDataType(sb, formatFlags, builder->arch(), embedNode->typeId()));
ASMJIT_PROPAGATE(formatDataType(sb, formatOptions.flags(), builder->arch(), embedNode->typeId()));
ASMJIT_PROPAGATE(sb.appendFormat(" {Count=%zu Repeat=%zu TotalSize=%zu}", embedNode->itemCount(), embedNode->repeatCount(), embedNode->dataSize()));
break;
}
case BaseNode::kNodeEmbedLabel: {
case NodeType::kEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
break;
}
case BaseNode::kNodeEmbedLabelDelta: {
case NodeType::kEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->labelId()));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case BaseNode::kNodeConstPool: {
case NodeType::kConstPool: {
const ConstPoolNode* constPoolNode = node->as<ConstPoolNode>();
ASMJIT_PROPAGATE(sb.appendFormat("[ConstPool Size=%zu Alignment=%zu]", constPoolNode->size(), constPoolNode->alignment()));
break;
};
case BaseNode::kNodeComment: {
case NodeType::kComment: {
const CommentNode* commentNode = node->as<CommentNode>();
ASMJIT_PROPAGATE(sb.appendFormat("; %s", commentNode->inlineComment()));
break;
}
case BaseNode::kNodeSentinel: {
case NodeType::kSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
switch (sentinelNode->sentinelType()) {
case SentinelNode::kSentinelFuncEnd:
case SentinelType::kFuncEnd:
sentinelName = "[FuncEnd]";
break;
@@ -504,20 +494,22 @@ Error formatNode(
}
#ifndef ASMJIT_NO_COMPILER
case BaseNode::kNodeFunc: {
case NodeType::kFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatFlags, builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
if (builder->isCompiler()) {
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, funcNode->labelId()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatFlags, builder, funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatFlags, builder, funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(sb.append(")"));
}
break;
}
case BaseNode::kNodeFuncRet: {
case NodeType::kFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
@@ -525,18 +517,17 @@ Error formatNode(
const Operand_& op = retNode->_opArray[i];
if (!op.isNone()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, builder, builder->arch(), op));
ASMJIT_PROPAGATE(formatOperand(sb, formatOptions.flags(), builder, builder->arch(), op));
}
}
break;
}
case BaseNode::kNodeInvoke: {
case NodeType::kInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(
formatInstruction(sb, formatFlags, builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
ASMJIT_PROPAGATE(formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
break;
}
#endif
@@ -547,28 +538,38 @@ Error formatNode(
}
}
if (node->hasInlineComment()) {
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex;
if (currentPadding < requiredPadding)
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment()));
}
return kErrorOk;
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatFlags, builder, builder->firstNode(), nullptr);
return formatNodeList(sb, formatOptions, builder, builder->firstNode(), nullptr);
}
Error formatNodeList(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatFlags, builder, node));
ASMJIT_PROPAGATE(formatNode(sb, formatOptions, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}

View File

@@ -1,126 +1,98 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_H_INCLUDED
#include "../core/globals.h"
#include "../core/inst.h"
#include "../core/string.h"
#ifndef ASMJIT_NO_LOGGING
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
// ============================================================================
// [Forward Declarations]
// ============================================================================
class BaseBuilder;
class BaseEmitter;
class BaseNode;
struct Operand_;
#ifndef ASMJIT_NO_BUILDER
class BaseBuilder;
class BaseNode;
#endif
//! Format flags used by \ref Logger and \ref FormatOptions.
enum class FormatFlags : uint32_t {
//! No formatting flags.
kNone = 0u,
#ifndef ASMJIT_NO_COMPILER
class BaseCompiler;
#endif
//! Show also binary form of each logged instruction (Assembler).
kMachineCode = 0x00000001u,
//! Show a text explanation of some immediate values.
kExplainImms = 0x00000002u,
//! Use hexadecimal notation of immediate values.
kHexImms = 0x00000004u,
//! Use hexadecimal notation of addresses and offsets in addresses.
kHexOffsets = 0x00000008u,
//! Show casts between virtual register types (Compiler output).
kRegCasts = 0x00000010u,
//! Show positions associated with nodes (Compiler output).
kPositions = 0x00000020u
};
ASMJIT_DEFINE_ENUM_FLAGS(FormatFlags)
// ============================================================================
// [asmjit::FormatOptions]
// ============================================================================
//! Format indentation group, used by \ref FormatOptions.
enum class FormatIndentationGroup : uint32_t {
//! Indentation used for instructions and directives.
kCode = 0u,
//! Indentation used for labels and function nodes.
kLabel = 1u,
//! Indentation used for comments (not inline comments).
kComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kReserved = 3u,
//! \endcond
//! Maximum value of `FormatIndentationGroup`.
kMaxValue = kReserved
};
//! Format padding group, used by \ref FormatOptions.
enum class FormatPaddingGroup : uint32_t {
//! Describes padding of a regular line, which can represent instruction, data, or assembler directives.
kRegularLine = 0,
//! Describes padding of machine code dump that is visible next to the instruction, if enabled.
kMachineCode = 1,
//! Maximum value of `FormatPaddingGroup`.
kMaxValue = kMachineCode
};
//! Formatting options used by \ref Logger and \ref Formatter.
class FormatOptions {
public:
//! Format flags, see \ref Flags.
uint32_t _flags;
//! Indentation by type, see \ref IndentationType.
uint8_t _indentation[4];
//! Flags can enable a logging feature.
enum Flags : uint32_t {
//! No flags.
kNoFlags = 0u,
//! Show also binary form of each logged instruction (Assembler).
kFlagMachineCode = 0x00000001u,
//! Show a text explanation of some immediate values.
kFlagExplainImms = 0x00000002u,
//! Use hexadecimal notation of immediate values.
kFlagHexImms = 0x00000004u,
//! Use hexadecimal notation of address offsets.
kFlagHexOffsets = 0x00000008u,
//! Show casts between virtual register types (Compiler).
kFlagRegCasts = 0x00000010u,
//! Show positions associated with nodes (Compiler).
kFlagPositions = 0x00000020u,
//! Annotate nodes that are lowered by passes.
kFlagAnnotations = 0x00000040u,
// TODO: These must go, keep this only for formatting.
//! Show an additional output from passes.
kFlagDebugPasses = 0x00000080u,
//! Show an additional output from RA.
kFlagDebugRA = 0x00000100u
};
//! Describes indentation type of code, label, or comment in logger output.
enum IndentationType : uint32_t {
//! Indentation used for instructions and directives.
kIndentationCode = 0u,
//! Indentation used for labels and function nodes.
kIndentationLabel = 1u,
//! Indentation used for comments (not inline comments).
kIndentationComment = 2u,
//! \cond INTERNAL
//! Reserved for future use.
kIndentationReserved = 3u
//! \endcond
};
//! \name Construction & Destruction
//! \name Members
//! \{
//! Creates a default-initialized FormatOptions.
constexpr FormatOptions() noexcept
: _flags(0),
_indentation { 0, 0, 0, 0 } {}
//! Format flags.
FormatFlags _flags = FormatFlags::kNone;
//! Indentations for each indentation group.
Support::Array<uint8_t, uint32_t(FormatIndentationGroup::kMaxValue) + 1> _indentation {};
//! Paddings for each padding group.
Support::Array<uint16_t, uint32_t(FormatPaddingGroup::kMaxValue) + 1> _padding {};
constexpr FormatOptions(const FormatOptions& other) noexcept = default;
inline FormatOptions& operator=(const FormatOptions& other) noexcept = default;
//! \}
//! \name Reset
//! \{
//! Resets FormatOptions to its default initialized state.
inline void reset() noexcept {
_flags = 0;
_indentation[0] = 0;
_indentation[1] = 0;
_indentation[2] = 0;
_indentation[3] = 0;
_flags = FormatFlags::kNone;
_indentation.fill(uint8_t(0));
_padding.fill(uint16_t(0));
}
//! \}
@@ -129,104 +101,109 @@ public:
//! \{
//! Returns format flags.
constexpr uint32_t flags() const noexcept { return _flags; }
inline FormatFlags flags() const noexcept { return _flags; }
//! Tests whether the given `flag` is set in format flags.
constexpr bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
//! Resets all format flags to `flags`.
inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
inline bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Returns indentation for the given `type`, see \ref IndentationType.
constexpr uint8_t indentation(uint32_t type) const noexcept { return _indentation[type]; }
//! Sets indentation for the given `type`, see \ref IndentationType.
inline void setIndentation(uint32_t type, uint32_t n) noexcept { _indentation[type] = uint8_t(n); }
//! Resets indentation for the given `type` to zero.
inline void resetIndentation(uint32_t type) noexcept { _indentation[type] = uint8_t(0); }
//! Resets all format flags to `flags`.
inline void setFlags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
inline void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
inline void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`.
inline uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`.
inline void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero.
inline void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns pading for the given padding `group`.
inline size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets pading for the given padding `group`.
inline void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets pading for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties.
inline void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }
//! \}
};
// ============================================================================
// [asmjit::Formatter]
// ============================================================================
//! Provides formatting functionality to format operands, instructions, and nodes.
namespace Formatter {
#ifndef ASMJIT_NO_LOGGING
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
String& sb,
uint32_t typeId) noexcept;
TypeId typeId) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//!
//! See \ref BaseFeatures.
//! See \ref CpuFeatures.
ASMJIT_API Error formatFeature(
String& sb,
uint32_t arch,
Arch arch,
uint32_t featureId) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers,
//! which won't be formatted properly if the `emitter` is not provided.
//! \note Emitter is optional, but it's required to format virtual registers, which won't be formatted properly
//! if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
uint32_t regType,
Arch arch,
RegType regType,
uint32_t regId) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels
//! properly, otherwise the formatted as it is an anonymous label.
//! \note Emitter is optional, but it's required to format named labels properly, otherwise the formatted as
//! it is an anonymous label.
ASMJIT_API Error formatLabel(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatOperand(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
Arch arch,
const Operand_& op) noexcept;
//! Appends a formatted data-type to the output string `sb`.
ASMJIT_API Error formatDataType(
String& sb,
uint32_t formatFlags,
uint32_t arch,
uint32_t typeId) noexcept;
FormatFlags formatFlags,
Arch arch,
TypeId typeId) noexcept;
//! Appends a formatted data to the output string `sb`.
ASMJIT_API Error formatData(
String& sb,
uint32_t formatFlags,
uint32_t arch,
uint32_t typeId, const void* data, size_t itemCount, size_t repeatCount = 1) noexcept;
FormatFlags formatFlags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and
//! virtual registers. See \ref formatRegister() and \ref formatLabel() for
//! more details.
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatInstruction(
String& sb,
uint32_t formatFlags,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t arch,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
#ifndef ASMJIT_NO_BUILDER
@@ -235,7 +212,7 @@ ASMJIT_API Error formatInstruction(
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
@@ -244,27 +221,27 @@ ASMJIT_API Error formatNode(
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes
//! to the output string, separating each node with a newline '\n' character.
//! This function works the same as \ref formatNode(), but appends more nodes to the output string,
//! separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
String& sb,
uint32_t formatFlags,
const FormatOptions& formatOptions,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;
#endif
#endif
} // {Formatter}
//! \}
ASMJIT_END_NAMESPACE
#endif
#endif // ASMJIT_CORE_FORMATTER_H_INCLUDED

View File

@@ -0,0 +1,34 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#define ASMJIT_CORE_FORMATTER_P_H_INCLUDED
#include "../core/formatter.h"
ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_logging
//! \{
namespace Formatter {
static ASMJIT_FORCE_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");
size_t padding = formatOptions.padding(group);
return padding ? padding : size_t(_defaultPaddingTable[uint32_t(group)]);
}
} // {Formatter}
//! \}
//! \endcond
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_FORMATTER_H_P_INCLUDED

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
@@ -38,11 +20,10 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::CallConv - Init / Reset]
// ============================================================================
// CallConv - Init & Reset
// =======================
ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId, const Environment& environment) noexcept {
ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& environment) noexcept {
reset();
#if !defined(ASMJIT_NO_X86)
@@ -58,12 +39,11 @@ ASMJIT_FAVOR_SIZE Error CallConv::init(uint32_t ccId, const Environment& environ
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncDetail - Init / Reset]
// ============================================================================
// FuncDetail - Init / Reset
// =========================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
uint32_t ccId = signature.callConv();
CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs))
@@ -73,19 +53,20 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
ASMJIT_PROPAGATE(cc.init(ccId, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize);
const uint8_t* signatureArgs = signature.args();
const TypeId* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(Type::deabstract(signatureArgs[argIndex], deabstractDelta));
argPack[0].initTypeId(TypeUtils::deabstract(signatureArgs[argIndex], deabstractDelta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
uint32_t ret = signature.ret();
if (ret != Type::kIdVoid)
_rets[0].initTypeId(Type::deabstract(ret, deabstractDelta));
TypeId ret = signature.ret();
if (ret != TypeId::kVoid)
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86())
@@ -97,28 +78,26 @@ ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const E
return arm::FuncInternal::initFuncDetail(*this, signature, registerSize);
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to
// be an implementation for the current architecture. However, stay safe.
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
// architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
}
// ============================================================================
// [asmjit::FuncFrame - Init / Finalize]
// ============================================================================
// FuncFrame - Init
// ================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
uint32_t arch = func.callConv().arch();
Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`.
// Properties like `_localStackSize` will be set by the user before the frame
// is finalized.
// Initializing FuncFrame means making a copy of some properties of `func`. Properties like `_localStackSize` will
// be set by the user before the frame is finalized.
reset();
_arch = uint8_t(arch);
_arch = arch;
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(BaseReg::kIdBad);
@@ -134,34 +113,37 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
if (func.hasFlag(CallConv::kFlagCalleePopsStack)) {
if (func.hasFlag(CallConvFlags::kCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
}
// Initial masks of dirty and preserved registers.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
for (RegGroup group : RegGroupVirtValues{}) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[BaseReg::kGroupGp] &= ~Support::bitMask(archTraits.spRegId());
_preservedRegs[RegGroup::kGp] &= ~Support::bitMask(archTraits.spRegId());
// The size and alignment of save/restore area of registers for each significant register group.
memcpy(_saveRestoreRegSize, func.callConv()._saveRestoreRegSize, sizeof(_saveRestoreRegSize));
memcpy(_saveRestoreAlignment, func.callConv()._saveRestoreAlignment, sizeof(_saveRestoreAlignment));
// The size and alignment of save/restore area of registers for each virtual register group
_saveRestoreRegSize = func.callConv()._saveRestoreRegSize;
_saveRestoreAlignment = func.callConv()._saveRestoreAlignment;
return kErrorOk;
}
// FuncFrame - Finalize
// ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch()))
return DebugUtils::errored(kErrorInvalidArch);
const ArchTraits& archTraits = ArchTraits::byArch(arch());
uint32_t registerSize = _saveRestoreRegSize[BaseReg::kGroupGp];
uint32_t vectorSize = _saveRestoreRegSize[BaseReg::kGroupVec];
uint32_t registerSize = _saveRestoreRegSize[RegGroup::kGp];
uint32_t vectorSize = _saveRestoreRegSize[RegGroup::kVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
// The final stack alignment must be updated accordingly to call and local stack alignments.
@@ -179,12 +161,12 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kFp);
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kFp);
// Currently required by ARM, if this works differently across architectures
// we would have to generalize most likely in CallConv.
// Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv.
if (kLr != BaseReg::kIdBad)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(kLr);
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
}
// These two are identical if the function doesn't align its stack dynamically.
@@ -192,22 +174,22 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (saRegId == BaseReg::kIdBad)
saRegId = kSp;
// Fix stack arguments base-register from SP to FP in case it was not picked
// before and the function performs dynamic stack alignment.
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment.
if (hasDA && saRegId == kSp)
saRegId = kFp;
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp)
_dirtyRegs[BaseReg::kGroupGp] |= Support::bitMask(saRegId);
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
saveRestoreSizes[size_t(!archTraits.hasPushPop(group))]
for (RegGroup group : RegGroupVirtValues{})
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
@@ -220,11 +202,10 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary
// to store vector registers, and set `FuncFrame::kAttrAlignedVecSR` to inform
// PEI that it can use instructions that perform aligned stores/loads.
// If the function's stack must be aligned, calculate the alignment necessary to store vector registers, and set
// `FuncAttributes::kAlignedVecSR` to inform PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncFrame::kAttrAlignedVecSR);
addAttributes(FuncAttributes::kAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
}
@@ -243,23 +224,19 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
// Link Register
// -------------
//
// The stack is aligned after the function call as the return address is
// stored in a link register. Some architectures may require to always
// have aligned stack after PUSH/POP operation, which is represented by
// ArchTraits::stackAlignmentConstraint().
// The stack is aligned after the function call as the return address is stored in a link register. Some
// architectures may require to always have aligned stack after PUSH/POP operation, which is represented
// by ArchTraits::stackAlignmentConstraint().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has
// the same size as `registerSize` (basically the native register/pointer
// size). We don't adjust it now as `v` now contains the exact size that the
// function requires to adjust (call frame + stack frame, vec stack size).
// The stack (if we consider this size) is misaligned now, as it's always
// aligned before the function call - when `call()` is executed it pushes
// the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes
// (depending on the architecture). So count number of bytes needed to align
// it up to the function's CallFrame (the beginning).
// The return address should be stored after GP save/restore regs. It has the same size as `registerSize`
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and misaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize)
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
@@ -285,12 +262,11 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::FuncArgsAssignment]
// ============================================================================
// FuncArgsAssignment - UpdateFuncFrame
// ====================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
uint32_t arch = frame.arch();
Arch arch = frame.arch();
const FuncDetail* func = funcDetail();
if (!func)

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/funcargscontext_p.h"
@@ -31,29 +13,24 @@ ASMJIT_BEGIN_NAMESPACE
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_workData[group].reset();
for (RegGroup group : RegGroupVirtValues{})
_workData[size_t(group)].reset();
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
// The code has to be updated if this changes.
ASMJIT_ASSERT(BaseReg::kGroupVirt == 4);
uint32_t i;
uint32_t arch = frame.arch();
Arch arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
_archTraits = &ArchTraits::byArch(arch);
_constraints = constraints;
_arch = uint8_t(arch);
_arch = arch;
// Initialize `_archRegs`.
for (i = 0; i < BaseReg::kGroupVirt; i++)
_workData[i]._archRegs = _constraints->availableRegs(i);
for (RegGroup group : RegGroupVirtValues{})
_workData[group]._archRegs = _constraints->availableRegs(group);
if (frame.hasPreservedFP())
_workData[BaseReg::kGroupGp]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
_workData[size_t(RegGroup::kGp)]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
@@ -73,7 +50,7 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
FuncValue& src = var.cur;
FuncValue& dst = var.out;
uint32_t dstGroup = 0xFFFFFFFFu;
RegGroup dstGroup = RegGroup::kMaxValue;
uint32_t dstId = BaseReg::kIdBad;
WorkData* dstWd = nullptr;
@@ -82,18 +59,17 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
return DebugUtils::errored(kErrorInvalidAssignment);
if (dst.isReg()) {
uint32_t dstType = dst.regType();
RegType dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType)))
return DebugUtils::errored(kErrorInvalidRegType);
// Copy TypeId from source if the destination doesn't have it. The RA
// used by BaseCompiler would never leave TypeId undefined, but users
// of FuncAPI can just assign phys regs without specifying the type.
// Copy TypeId from source if the destination doesn't have it. The RA used by BaseCompiler would never
// leave TypeId undefined, but users of FuncAPI can just assign phys regs without specifying the type.
if (!dst.hasTypeId())
dst.setTypeId(archTraits().regTypeToTypeId(dst.regType()));
dstGroup = archTraits().regTypeToGroup(dstType);
if (ASMJIT_UNLIKELY(dstGroup >= BaseReg::kGroupVirt))
if (ASMJIT_UNLIKELY(dstGroup > RegGroup::kMaxVirt))
return DebugUtils::errored(kErrorInvalidRegGroup);
dstWd = &_workData[dstGroup];
@@ -112,15 +88,15 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
if (!dst.hasTypeId())
dst.setTypeId(src.typeId());
RegInfo regInfo = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!regInfo.isValid()))
OperandSignature signature = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid()))
return DebugUtils::errored(kErrorInvalidState);
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(regInfo.group()));
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(signature.regGroup()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
uint32_t srcGroup = archTraits().regTypeToGroup(src.regType());
RegGroup srcGroup = archTraits().regTypeToGroup(src.regType());
if (dstGroup == srcGroup) {
dstWd->assign(varId, srcId);
@@ -130,10 +106,10 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
var.markDone();
}
else {
if (ASMJIT_UNLIKELY(srcGroup >= BaseReg::kGroupVirt))
if (ASMJIT_UNLIKELY(srcGroup > RegGroup::kMaxVirt))
return DebugUtils::errored(kErrorInvalidState);
WorkData& srcData = _workData[srcGroup];
WorkData& srcData = _workData[size_t(srcGroup)];
srcData.assign(varId, srcId);
}
}
@@ -148,14 +124,15 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
}
// Initialize WorkData::workRegs.
for (i = 0; i < BaseReg::kGroupVirt; i++) {
_workData[i]._workRegs = (_workData[i].archRegs() & (frame.dirtyRegs(i) | ~frame.preservedRegs(i))) | _workData[i].dstRegs() | _workData[i].assignedRegs();
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._workRegs =
(_workData[group].archRegs() & (frame.dirtyRegs(group) | ~frame.preservedRegs(group))) | _workData[group].dstRegs() | _workData[group].assignedRegs();
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
WorkData& gpRegs = _workData[BaseReg::kGroupGp];
WorkData& gpRegs = _workData[RegGroup::kGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
@@ -173,8 +150,8 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
}
if (saRegRequired) {
uint32_t ptrTypeId = Environment::is32Bit(arch) ? Type::kIdU32 : Type::kIdU64;
uint32_t ptrRegType = Environment::is32Bit(arch) ? BaseReg::kTypeGp32 : BaseReg::kTypeGp64;
TypeId ptrTypeId = Environment::is32Bit(arch) ? TypeId::kUInt32 : TypeId::kUInt64;
RegType ptrRegType = Environment::is32Bit(arch) ? RegType::kGp32 : RegType::kGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
@@ -187,7 +164,7 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
saCurRegId = saOutRegId;
}
else {
uint32_t availableRegs = gpRegs.availableRegs();
RegMask availableRegs = gpRegs.availableRegs();
if (!availableRegs)
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
@@ -223,7 +200,7 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
uint32_t group = archTraits().regTypeToGroup(var.cur.regType());
RegGroup group = archTraits().regTypeToGroup(var.cur.regType());
if (group != archTraits().regTypeToGroup(var.out.regType()))
continue;
@@ -242,12 +219,12 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, co
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (uint32_t i = 0; i < BaseReg::kGroupVirt; i++) {
WorkData& wd = _workData[i];
for (RegGroup group : RegGroupVirtValues{}) {
WorkData& wd = _workData[group];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
wd._workRegs |= regs;
frame.addDirtyRegs(i, regs);
frame.addDirtyRegs(group, regs);
}
return kErrorOk;
@@ -260,19 +237,19 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexc
groupMask |= _stackDstMask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(BaseReg::kGroupGp);
groupMask |= _regSwapsMask & ~Support::bitMask(RegGroup::kGp);
if (!groupMask)
return kErrorOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
for (RegGroup group : RegGroupVirtValues{}) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
// Initially, pick some clobbered or dirty register.
uint32_t workRegs = wd.workRegs();
uint32_t regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
RegMask workRegs = wd.workRegs();
RegMask regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs)
@@ -288,7 +265,7 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexc
if (!regs)
continue;
uint32_t regMask = Support::blsi(regs);
RegMask regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
#define ASMJIT_CORE_FUNCARGSCONTEXT_P_H_INCLUDED
@@ -37,38 +19,30 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [TODO: Place somewhere else]
// ============================================================================
static inline RegInfo getSuitableRegForMemToMemMove(uint32_t arch, uint32_t dstTypeId, uint32_t srcTypeId) noexcept {
static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId dstTypeId, TypeId srcTypeId) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
uint32_t dstSize = Type::sizeOf(dstTypeId);
uint32_t srcSize = Type::sizeOf(srcTypeId);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
uint32_t regSize = Environment::registerSizeFromArch(arch);
uint32_t signature = 0;
if (maxSize <= regSize || (Type::isInt(dstTypeId) && Type::isInt(srcTypeId)))
signature = maxSize <= 4 ? archTraits.regTypeToSignature(BaseReg::kTypeGp32)
: archTraits.regTypeToSignature(BaseReg::kTypeGp64);
else if (maxSize <= 8 && archTraits.hasRegType(BaseReg::kTypeVec64))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec64);
else if (maxSize <= 16 && archTraits.hasRegType(BaseReg::kTypeVec128))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec128);
else if (maxSize <= 32 && archTraits.hasRegType(BaseReg::kTypeVec256))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec256);
else if (maxSize <= 64 && archTraits.hasRegType(BaseReg::kTypeVec512))
signature = archTraits.regTypeToSignature(BaseReg::kTypeVec512);
OperandSignature signature(0);
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId)))
signature = maxSize <= 4 ? archTraits.regTypeToSignature(RegType::kGp32)
: archTraits.regTypeToSignature(RegType::kGp64);
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64))
signature = archTraits.regTypeToSignature(RegType::kVec64);
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128))
signature = archTraits.regTypeToSignature(RegType::kVec128);
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256))
signature = archTraits.regTypeToSignature(RegType::kVec256);
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512))
signature = archTraits.regTypeToSignature(RegType::kVec512);
return RegInfo { signature };
return signature;
}
// ============================================================================
// [asmjit::FuncArgsContext]
// ============================================================================
class FuncArgsContext {
public:
enum VarId : uint32_t {
@@ -97,17 +71,17 @@ public:
struct WorkData {
//! All allocable registers provided by the architecture.
uint32_t _archRegs;
RegMask _archRegs;
//! All registers that can be used by the shuffler.
uint32_t _workRegs;
RegMask _workRegs;
//! Registers used by the shuffler (all).
uint32_t _usedRegs;
RegMask _usedRegs;
//! Assigned registers.
uint32_t _assignedRegs;
RegMask _assignedRegs;
//! Destination registers assigned to arguments or SA.
uint32_t _dstRegs;
RegMask _dstRegs;
//! Destination registers that require shuffling.
uint32_t _dstShuf;
RegMask _dstShuf;
//! Number of register swaps.
uint8_t _numSwaps;
//! Number of stack loads.
@@ -173,19 +147,20 @@ public:
_assignedRegs ^= Support::bitMask(regId);
}
inline uint32_t archRegs() const noexcept { return _archRegs; }
inline uint32_t workRegs() const noexcept { return _workRegs; }
inline uint32_t usedRegs() const noexcept { return _usedRegs; }
inline uint32_t assignedRegs() const noexcept { return _assignedRegs; }
inline uint32_t dstRegs() const noexcept { return _dstRegs; }
inline uint32_t availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
inline RegMask archRegs() const noexcept { return _archRegs; }
inline RegMask workRegs() const noexcept { return _workRegs; }
inline RegMask usedRegs() const noexcept { return _usedRegs; }
inline RegMask assignedRegs() const noexcept { return _assignedRegs; }
inline RegMask dstRegs() const noexcept { return _dstRegs; }
inline RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
};
//! Architecture traits.
const ArchTraits* _archTraits = nullptr;
//! Architecture constraints.
const RAConstraints* _constraints = nullptr;
//! Architecture identifier.
uint8_t _arch = 0;
//! Target architecture.
Arch _arch = Arch::kUnknown;
//! Has arguments passed via stack (SRC).
bool _hasStackSrc = false;
//! Has preserved frame-pointer (FP).
@@ -196,13 +171,13 @@ public:
uint8_t _regSwapsMask = 0;
uint8_t _saVarId = kVarIdNone;
uint32_t _varCount = 0;
WorkData _workData[BaseReg::kGroupVirt];
Support::Array<WorkData, Globals::kNumVirtGroups> _workData;
Var _vars[Globals::kMaxFuncArgs * Globals::kMaxValuePack + 1];
FuncArgsContext() noexcept;
inline const ArchTraits& archTraits() const noexcept { return *_archTraits; }
inline uint32_t arch() const noexcept { return _arch; }
inline Arch arch() const noexcept { return _arch; }
inline uint32_t varCount() const noexcept { return _varCount; }
inline size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/globals.h"
@@ -27,9 +9,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
// DebugUtils - Error As String
// ============================
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#ifndef ASMJIT_NO_TEXT
@@ -54,7 +35,6 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
"LabelNameTooLong\0"
"InvalidLabelName\0"
"InvalidParentLabel\0"
"NonLocalLabelCannotHaveParent\0"
"InvalidSection\0"
"TooManySections\0"
"InvalidSectionName\0"
@@ -97,6 +77,7 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
"InvalidUseOfGpq\0"
"InvalidUseOfF80\0"
"NotConsecutiveRegs\0"
"ConsecutiveRegsAllocation\0"
"IllegalVirtReg\0"
"TooManyVirtRegs\0"
"NoMorePhysRegs\0"
@@ -109,10 +90,10 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
static const uint16_t sErrorIndex[] = {
0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
247, 264, 283, 313, 328, 344, 363, 382, 400, 422, 440, 459, 474, 490, 504,
518, 538, 563, 581, 603, 625, 642, 659, 675, 691, 707, 724, 739, 754, 774,
794, 814, 847, 867, 882, 899, 918, 939, 959, 973, 994, 1008, 1026, 1042,
1058, 1077, 1092, 1108, 1123, 1138, 1168, 1192, 1211, 1239
247, 264, 283, 298, 314, 333, 352, 370, 392, 410, 429, 444, 460, 474, 488,
508, 533, 551, 573, 595, 612, 629, 645, 661, 677, 694, 709, 724, 744, 764,
784, 817, 837, 852, 869, 888, 909, 929, 943, 964, 978, 996, 1012, 1028, 1047,
1073, 1088, 1104, 1119, 1134, 1164, 1188, 1207, 1235
};
// @EnumStringEnd@
@@ -124,6 +105,9 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#endif
}
// DebugUtils - Debug Output
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
@@ -132,6 +116,9 @@ ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#endif
}
// DebugUtils - Fatal Errors
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
char str[1024];

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_GLOBALS_H_INCLUDED
#define ASMJIT_CORE_GLOBALS_H_INCLUDED
@@ -28,10 +10,6 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Support]
// ============================================================================
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
@@ -43,21 +21,21 @@ namespace Support {
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
ASMJIT_FORCE_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_FORCE_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_INLINE void* operator new(size_t n) noexcept { \
return Support::operatorNew(n); \
} \
\
ASMJIT_INLINE void operator delete(void* p) noexcept { \
Support::operatorDelete(p); \
} \
\
ASMJIT_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
ASMJIT_INLINE void operator delete(void*, void*) noexcept {}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_FORCE_INLINE void* operator new(size_t n) noexcept { \
return Support::operatorNew(n); \
} \
\
ASMJIT_FORCE_INLINE void operator delete(void* p) noexcept { \
Support::operatorDelete(p); \
} \
\
ASMJIT_FORCE_INLINE void* operator new(size_t, void* p) noexcept { return p; } \
ASMJIT_FORCE_INLINE void operator delete(void*, void*) noexcept {}
#else
#define ASMJIT_BASE_CLASS(TYPE)
#endif
@@ -65,20 +43,32 @@ namespace Support {
//! \}
//! \endcond
// ============================================================================
// [asmjit::Globals]
// ============================================================================
//! \addtogroup asmjit_core
//! \{
//! Byte order.
enum class ByteOrder {
//! Little endian.
kLE = 0,
//! Big endian.
kBE = 1,
//! Native byte order of the target architecture.
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
//! Swapped byte order of the target architecture.
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
//! A policy that can be used with some `reset()` member functions.
enum class ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
kSoft = 0,
//! Hard reset, releases all memory used, if any.
kHard = 1
};
//! Contains typedefs, constants, and variables used globally by AsmJit.
namespace Globals {
// ============================================================================
// [asmjit::Globals::<global>]
// ============================================================================
//! Host memory allocator overhead.
static constexpr uint32_t kAllocOverhead = uint32_t(sizeof(intptr_t) * 4);
@@ -92,13 +82,11 @@ static constexpr uint32_t kGrowThreshold = 1024 * 1024 * 16;
//!
//! `2 * log2(n + 1)`
//!
//! Size of RB node is at least two pointers (without data),
//! so a theoretical architecture limit would be:
//! Size of RB node is at least two pointers (without data), so a theoretical architecture limit would be:
//!
//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
//!
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch.
//! The final value was adjusted by +1 for safety reasons.
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch. The final value was adjusted by +1 for safety reasons.
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
//! Maximum number of operands per a single instruction.
@@ -135,34 +123,8 @@ static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
//! Invalid base address.
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
// ============================================================================
// [asmjit::Globals::ResetPolicy]
// ============================================================================
//! Reset policy used by most `reset()` functions.
enum ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
kResetSoft = 0,
//! Hard reset, releases all memory used, if any.
kResetHard = 1
};
// ============================================================================
// [asmjit::Globals::Link]
// ============================================================================
enum Link : uint32_t {
kLinkLeft = 0,
kLinkRight = 1,
kLinkPrev = 0,
kLinkNext = 1,
kLinkFirst = 0,
kLinkLast = 1,
kLinkCount = 2
};
//! Number of virtual register groups.
static constexpr uint32_t kNumVirtGroups = 4;
struct Init_ {};
struct NoInit_ {};
@@ -172,24 +134,6 @@ static const constexpr NoInit_ NoInit {};
} // {Globals}
// ============================================================================
// [asmjit::ByteOrder]
// ============================================================================
//! Byte order.
namespace ByteOrder {
enum : uint32_t {
kLE = 0,
kBE = 1,
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
}
// ============================================================================
// [asmjit::ptr_as_func / func_as_ptr]
// ============================================================================
template<typename Func>
static inline Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
@@ -198,10 +142,6 @@ static inline void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_i
//! \}
// ============================================================================
// [asmjit::Error]
// ============================================================================
//! \addtogroup asmjit_error_handling
//! \{
@@ -223,9 +163,8 @@ enum ErrorCode : uint32_t {
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something
//! wrong or AsmJit caught itself by doing something wrong. This error should
//! never be ignored.
//! If this error is returned it means that either you are doing something wrong or AsmJit caught itself by
//! doing something wrong. This error should never be ignored.
kErrorInvalidState,
//! Invalid or incompatible architecture.
@@ -253,9 +192,8 @@ enum ErrorCode : uint32_t {
kErrorInvalidDirective,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
//! Label index overflow - a single \ref BaseAssembler instance can hold
//! almost 2^32 (4 billion) labels. If there is an attempt to create more
//! labels then this error is returned.
//! Label index overflow - a single \ref BaseAssembler instance can hold almost 2^32 (4 billion) labels. If
//! there is an attempt to create more labels then this error is returned.
kErrorTooManyLabels,
//! Label is already bound.
kErrorLabelAlreadyBound,
@@ -265,10 +203,9 @@ enum ErrorCode : uint32_t {
kErrorLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to \ref CodeHolder::newNamedLabelEntry() was invalid.
//! Parent id passed to \ref CodeHolder::newNamedLabelEntry() was either invalid or parent is not supported
//! by the requested `LabelType`.
kErrorInvalidParentLabel,
//! Parent id specified for a non-local (global) label.
kErrorNonLocalLabelCannotHaveParent,
//! Invalid section.
kErrorInvalidSection,
@@ -356,11 +293,12 @@ enum ErrorCode : uint32_t {
kErrorInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
//! Invalid use of an 80-bit float (\ref Type::kIdF80).
//! Invalid use of an 80-bit float (\ref TypeId::kFloat80).
kErrorInvalidUseOfF80,
//! Some registers in the instruction muse be consecutive (some ARM and AVX512
//! neural-net instructions).
//! Instruction requires the use of consecutive registers, but registers in operands weren't (AVX512, ASIMD load/store, etc...).
kErrorNotConsecutiveRegs,
//! Failed to allocate consecutive registers - allocable registers either too restricted or a bug in RW info.
kErrorConsecutiveRegsAllocation,
//! Illegal virtual register - reported by instruction validation.
kErrorIllegalVirtReg,
@@ -388,23 +326,19 @@ enum ErrorCode : uint32_t {
kErrorCount
};
// ============================================================================
// [asmjit::DebugUtils]
// ============================================================================
//! Debugging utilities.
namespace DebugUtils {
//! \cond INTERNAL
//! Used to silence warnings about unused arguments or variables.
template<typename... Args>
static ASMJIT_INLINE void unused(Args&&...) noexcept {}
static inline void unused(Args&&...) noexcept {}
//! \endcond
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can
//! help with tracing the origin of any error reported / returned by AsmJit.
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit.
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
@@ -419,12 +353,10 @@ ASMJIT_API void debugOutput(const char* str) noexcept;
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put
//! at \ref assertionFailed() function (asmjit/core/globals.cpp). A call stack
//! will be available when such assertion failure is triggered. AsmJit always
//! returns errors on failures, assertions are a last resort and usually mean
//! unrecoverable state due to out of range array access or totally invalid
//! arguments like nullptr where a valid pointer should be provided, etc...
//! If you have problems with assertion failures a breakpoint can be put at \ref assertionFailed() function
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
@@ -445,9 +377,8 @@ ASMJIT_API void ASMJIT_NORETURN assertionFailed(const char* file, int line, cons
//! \def ASMJIT_PROPAGATE(...)
//!
//! Propagates a possible `Error` produced by `...` to the caller by returning
//! the error immediately. Used by AsmJit internally, but kept public for users
//! that want to use the same technique to propagate errors to the caller.
//! Propagates a possible `Error` produced by `...` to the caller by returning the error immediately. Used by AsmJit
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err = __VA_ARGS__; \

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/archtraits.h"
@@ -35,12 +17,11 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::InstAPI - Text]
// ============================================================================
// InstAPI - InstId <-> String
// ===========================
#ifndef ASMJIT_NO_TEXT
Error InstAPI::instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept {
Error InstAPI::instIdToString(Arch arch, InstId instId, String& output) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::instIdToString(arch, instId, output);
@@ -54,7 +35,7 @@ Error InstAPI::instIdToString(uint32_t arch, uint32_t instId, String& output) no
return DebugUtils::errored(kErrorInvalidArch);
}
uint32_t InstAPI::stringToInstId(uint32_t arch, const char* s, size_t len) noexcept {
InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::stringToInstId(arch, s, len);
@@ -69,12 +50,11 @@ uint32_t InstAPI::stringToInstId(uint32_t arch, const char* s, size_t len) noexc
}
#endif // !ASMJIT_NO_TEXT
// ============================================================================
// [asmjit::InstAPI - Validate]
// ============================================================================
// InstAPI - Validate
// ==================
#ifndef ASMJIT_NO_VALIDATION
Error InstAPI::validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags) noexcept {
Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::validate(arch, inst, operands, opCount, validationFlags);
@@ -89,12 +69,11 @@ Error InstAPI::validate(uint32_t arch, const BaseInst& inst, const Operand_* ope
}
#endif // !ASMJIT_NO_VALIDATION
// ============================================================================
// [asmjit::InstAPI - QueryRWInfo]
// ============================================================================
// InstAPI - QueryRWInfo
// =====================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount))
return DebugUtils::errored(kErrorInvalidArgument);
@@ -112,12 +91,11 @@ Error InstAPI::queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_*
}
#endif // !ASMJIT_NO_INTROSPECTION
// ============================================================================
// [asmjit::InstAPI - QueryFeatures]
// ============================================================================
// InstAPI - QueryFeatures
// =======================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept {
Error InstAPI::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch))
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_INST_H_INCLUDED
#define ASMJIT_CORE_INST_H_INCLUDED
@@ -34,20 +16,179 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_instruction_db
//! \{
// ============================================================================
// [asmjit::BaseInst]
// ============================================================================
//! Describes an instruction.
//!
//! Each architecture has a set of valid instructions indexed from 0. Instruction with 0 id is, however, a special
//! instruction that describes an invalid instruction. Different architectures can share the same instruction id,
//! which would describe a different instruction per architecture.
//!
//! Instruction identifiers listed by architecture:
//!
//! - \ref x86::Inst (X86 and X86_64)
typedef uint32_t InstId;
//! Instruction id, options, and extraReg in a single structure. This structure
//! exists mainly to simplify analysis and validation API that requires `BaseInst`
//! and `Operand[]` array.
//! Instruction options.
//!
//! Instruction options complement instruction identifier and attributes.
enum class InstOptions : uint32_t {
//! No options.
kNone = 0,
//! Used internally by emitters for handling errors and rare cases.
kReserved = 0x00000001u,
//! Prevents following a jump during compilation (Compiler).
kUnfollow = 0x00000002u,
//! Overwrite the destination operand(s) (Compiler).
//!
//! Hint that is important for register liveness analysis. It tells the compiler that the destination operand will
//! be overwritten now or by adjacent instructions. Compiler knows when a register is completely overwritten by a
//! single instruction, for example you don't have to mark "movaps" or "pxor x, x", however, if a pair of
//! instructions is used and the first of them doesn't completely overwrite the content of the destination,
//! Compiler fails to mark that register as dead.
//!
//! X86 Specific
//! ------------
//!
//! - All instructions that always overwrite at least the size of the register the virtual-register uses, for
//! example "mov", "movq", "movaps" don't need the overwrite option to be used - conversion, shuffle, and
//! other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands are the same, for example "xor x, x",
//! "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until there is no old content require
//! `BaseCompiler::overwrite()` to be used. Some examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If the allocated virtual register is used temporarily for scalar operations. For example if you allocate a
//! full vector like `x86::Compiler::newXmm()` and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
//! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
kOverwrite = 0x00000004u,
//! Emit short-form of the instruction.
kShortForm = 0x00000010u,
//! Emit long-form of the instruction.
kLongForm = 0x00000020u,
//! Conditional jump is likely to be taken.
kTaken = 0x00000040u,
//! Conditional jump is unlikely to be taken.
kNotTaken = 0x00000080u,
// X86 & X64 Options
// -----------------
//! Use ModMR instead of ModRM if applicable.
kX86_ModMR = 0x00000100u,
//! Use ModRM instead of ModMR if applicable.
kX86_ModRM = 0x00000200u,
//! Use 3-byte VEX prefix if possible (AVX) (must be 0x00000400).
kX86_Vex3 = 0x00000400u,
//! Use VEX prefix when both VEX|EVEX prefixes are available (HINT: AVX_VNNI).
kX86_Vex = 0x00000800u,
//! Use 4-byte EVEX prefix if possible (AVX-512) (must be 0x00001000).
kX86_Evex = 0x00001000u,
//! LOCK prefix (lock-enabled instructions only).
kX86_Lock = 0x00002000u,
//! REP prefix (string instructions only).
kX86_Rep = 0x00004000u,
//! REPNE prefix (string instructions only).
kX86_Repne = 0x00008000u,
//! XACQUIRE prefix (only allowed instructions).
kX86_XAcquire = 0x00010000u,
//! XRELEASE prefix (only allowed instructions).
kX86_XRelease = 0x00020000u,
//! AVX-512: embedded-rounding {er} and implicit {sae}.
kX86_ER = 0x00040000u,
//! AVX-512: suppress-all-exceptions {sae}.
kX86_SAE = 0x00080000u,
//! AVX-512: round-to-nearest (even) {rn-sae} (bits 00).
kX86_RN_SAE = 0x00000000u,
//! AVX-512: round-down (toward -inf) {rd-sae} (bits 01).
kX86_RD_SAE = 0x00200000u,
//! AVX-512: round-up (toward +inf) {ru-sae} (bits 10).
kX86_RU_SAE = 0x00400000u,
//! AVX-512: round-toward-zero (truncate) {rz-sae} (bits 11).
kX86_RZ_SAE = 0x00600000u,
//! AVX-512: Use zeroing {k}{z} instead of merging {k}.
kX86_ZMask = 0x00800000u,
//! AVX-512: Mask to get embedded rounding bits (2 bits).
kX86_ERMask = kX86_RZ_SAE,
//! AVX-512: Mask of all possible AVX-512 options except EVEX prefix flag.
kX86_AVX512Mask = 0x00FC0000u,
//! Force REX.B and/or VEX.B field (X64 only).
kX86_OpCodeB = 0x01000000u,
//! Force REX.X and/or VEX.X field (X64 only).
kX86_OpCodeX = 0x02000000u,
//! Force REX.R and/or VEX.R field (X64 only).
kX86_OpCodeR = 0x04000000u,
//! Force REX.W and/or VEX.W field (X64 only).
kX86_OpCodeW = 0x08000000u,
//! Force REX prefix (X64 only).
kX86_Rex = 0x40000000u,
//! Invalid REX prefix (set by X86 or when AH|BH|CH|DH regs are used on X64).
kX86_InvalidRex = 0x80000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(InstOptions)
//! Instruction control flow.
enum class InstControlFlow : uint32_t {
//! Regular instruction.
kRegular = 0u,
//! Unconditional jump.
kJump = 1u,
//! Conditional jump (branch).
kBranch = 2u,
//! Function call.
kCall = 3u,
//! Function return.
kReturn = 4u,
//! Maximum value of `InstType`.
kMaxValue = kReturn
};
//! Hint that is used when both input operands to the instruction are the same.
//!
//! Provides hints to the instrution RW query regarding special cases in which two or more operands are the same
//! registers. This is required by instructions such as XOR, AND, OR, SUB, etc... These hints will influence the
//! RW operations query.
enum class InstSameRegHint : uint8_t {
//! No special handling.
kNone = 0,
//! Operands become read-only, the operation doesn't change the content - `X & X` and similar.
kRO = 1,
//! Operands become write-only, the content of the input(s) don't matter - `X ^ X`, `X - X`, and similar.
kWO = 2
};
//! Instruction id, options, and extraReg in a single structure. This structure exists mainly to simplify analysis
//! and validation API that requires `BaseInst` and `Operand[]` array.
class BaseInst {
public:
//! Instruction id, see \ref BaseInst::Id or {arch-specific}::Inst::Id.
uint32_t _id;
//! Instruction options, see \ref BaseInst::Options or {arch-specific}::Inst::Options.
uint32_t _options;
//! Extra register used by instruction (either REP register or AVX-512 selector).
//! \name Members
//! \{
//! Instruction id.
InstId _id;
//! Instruction options.
InstOptions _options;
//! Extra register used by the instruction (either REP register or AVX-512 selector).
RegOnly _extraReg;
enum Id : uint32_t {
@@ -57,110 +198,39 @@ public:
kIdAbstract = 0x80000000u
};
enum Options : uint32_t {
//! Used internally by emitters for handling errors and rare cases.
kOptionReserved = 0x00000001u,
//! Prevents following a jump during compilation (BaseCompiler).
kOptionUnfollow = 0x00000002u,
//! Overwrite the destination operand(s) (BaseCompiler).
//!
//! Hint that is important for register liveness analysis. It tells the
//! compiler that the destination operand will be overwritten now or by
//! adjacent instructions. BaseCompiler knows when a register is completely
//! overwritten by a single instruction, for example you don't have to
//! mark "movaps" or "pxor x, x", however, if a pair of instructions is
//! used and the first of them doesn't completely overwrite the content
//! of the destination, BaseCompiler fails to mark that register as dead.
//!
//! X86 Specific
//! ------------
//!
//! - All instructions that always overwrite at least the size of the
//! register the virtual-register uses , for example "mov", "movq",
//! "movaps" don't need the overwrite option to be used - conversion,
//! shuffle, and other miscellaneous instructions included.
//!
//! - All instructions that clear the destination register if all operands
//! are the same, for example "xor x, x", "pcmpeqb x x", etc...
//!
//! - Consecutive instructions that partially overwrite the variable until
//! there is no old content require `BaseCompiler::overwrite()` to be used.
//! Some examples (not always the best use cases thought):
//!
//! - `movlps xmm0, ?` followed by `movhps xmm0, ?` and vice versa
//! - `movlpd xmm0, ?` followed by `movhpd xmm0, ?` and vice versa
//! - `mov al, ?` followed by `and ax, 0xFF`
//! - `mov al, ?` followed by `mov ah, al`
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If allocated variable is used temporarily for scalar operations. For
//! example if you allocate a full vector like `x86::Compiler::newXmm()`
//! and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
//! use HI elements, use `compiler.overwrite().sqrtss(x, y)`.
kOptionOverwrite = 0x00000004u,
//! Emit short-form of the instruction.
kOptionShortForm = 0x00000010u,
//! Emit long-form of the instruction.
kOptionLongForm = 0x00000020u,
//! Conditional jump is likely to be taken.
kOptionTaken = 0x00000040u,
//! Conditional jump is unlikely to be taken.
kOptionNotTaken = 0x00000080u
};
//! Control type.
enum ControlType : uint32_t {
//! No control type (doesn't jump).
kControlNone = 0u,
//! Unconditional jump.
kControlJump = 1u,
//! Conditional jump (branch).
kControlBranch = 2u,
//! Function call.
kControlCall = 3u,
//! Function return.
kControlReturn = 4u
};
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new BaseInst instance with `id` and `options` set.
//!
//! Default values of `id` and `options` are zero, which means none instruction.
//! Such instruction is guaranteed to never exist for any architecture supported
//! by AsmJit.
inline explicit BaseInst(uint32_t id = 0, uint32_t options = 0) noexcept
: _id(id),
//! Default values of `id` and `options` are zero, which means 'none' instruction. Such instruction is guaranteed
//! to never exist for any architecture supported by AsmJit.
inline explicit BaseInst(InstId instId = 0, InstOptions options = InstOptions::kNone) noexcept
: _id(instId),
_options(options),
_extraReg() {}
inline BaseInst(uint32_t id, uint32_t options, const RegOnly& extraReg) noexcept
: _id(id),
inline BaseInst(InstId instId, InstOptions options, const RegOnly& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg(extraReg) {}
inline BaseInst(uint32_t id, uint32_t options, const BaseReg& extraReg) noexcept
: _id(id),
inline BaseInst(InstId instId, InstOptions options, const BaseReg& extraReg) noexcept
: _id(instId),
_options(options),
_extraReg { extraReg.signature(), extraReg.id() } {}
//! \}
//! \name Instruction ID
//! \name Instruction Id
//! \{
//! Returns the instruction id.
inline uint32_t id() const noexcept { return _id; }
inline InstId id() const noexcept { return _id; }
//! Sets the instruction id to the given `id`.
inline void setId(uint32_t id) noexcept { _id = id; }
inline void setId(InstId id) noexcept { _id = id; }
//! Resets the instruction id to zero, see \ref kIdNone.
inline void resetId() noexcept { _id = 0; }
@@ -169,12 +239,12 @@ public:
//! \name Instruction Options
//! \{
inline uint32_t options() const noexcept { return _options; }
inline bool hasOption(uint32_t option) const noexcept { return (_options & option) != 0; }
inline void setOptions(uint32_t options) noexcept { _options = options; }
inline void addOptions(uint32_t options) noexcept { _options |= options; }
inline void clearOptions(uint32_t options) noexcept { _options &= ~options; }
inline void resetOptions() noexcept { _options = 0; }
inline InstOptions options() const noexcept { return _options; }
inline bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
inline void setOptions(InstOptions options) noexcept { _options = options; }
inline void addOptions(InstOptions options) noexcept { _options |= options; }
inline void clearOptions(InstOptions options) noexcept { _options &= ~options; }
inline void resetOptions() noexcept { _options = InstOptions::kNone; }
//! \}
@@ -191,20 +261,144 @@ public:
//! \}
};
// ============================================================================
// [asmjit::OpRWInfo]
// ============================================================================
//! CPU read/write flags used by \ref InstRWInfo.
//!
//! These flags can be used to get a basic overview about CPU specifics flags used by instructions.
enum class CpuRWFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
// Common RW Flags (0x000000FF)
// ----------------------------
//! Carry flag.
kCF = 0x00000001u,
//! Signed overflow flag.
kOF = 0x00000002u,
//! Sign flag (negative/sign, if set).
kSF = 0x00000004u,
//! Zero and/or equality flag (1 if zero/equal).
kZF = 0x00000008u,
// X86 Specific RW Flags (0xFFFFFF00)
// ----------------------------------
//! Carry flag (X86, X86_64).
kX86_CF = kCF,
//! Overflow flag (X86, X86_64).
kX86_OF = kOF,
//! Sign flag (X86, X86_64).
kX86_SF = kSF,
//! Zero flag (X86, X86_64).
kX86_ZF = kZF,
//! Adjust flag (X86, X86_64).
kX86_AF = 0x00000100u,
//! Parity flag (X86, X86_64).
kX86_PF = 0x00000200u,
//! Direction flag (X86, X86_64).
kX86_DF = 0x00000400u,
//! Interrupt enable flag (X86, X86_64).
kX86_IF = 0x00000800u,
//! Alignment check flag (X86, X86_64).
kX86_AC = 0x00001000u,
//! FPU C0 status flag (X86, X86_64).
kX86_C0 = 0x00010000u,
//! FPU C1 status flag (X86, X86_64).
kX86_C1 = 0x00020000u,
//! FPU C2 status flag (X86, X86_64).
kX86_C2 = 0x00040000u,
//! FPU C3 status flag (X86, X86_64).
kX86_C3 = 0x00080000u
};
ASMJIT_DEFINE_ENUM_FLAGS(CpuRWFlags)
//! Operand read/write flags describe how the operand is accessed and some additional features.
enum class OpRWFlags {
//! No flags.
kNone = 0,
//! Operand is read.
kRead = 0x00000001u,
//! Operand is written.
kWrite = 0x00000002u,
//! Operand is both read and written.
kRW = 0x00000003u,
//! Register operand can be replaced by a memory operand.
kRegMem = 0x00000004u,
//! The register must be allocated to the index of the previous register + 1.
//!
//! This flag is used by all architectures to describe instructions that use consecutive registers, where only the
//! first one is encoded in the instruction, and the others are just a sequence that starts with the first one. On
//! X86/X86_64 architecture this is used by instructions such as V4FMADDPS, V4FMADDSS, V4FNMADDPS, V4FNMADDSS,
//! VP4DPWSSD, VP4DPWSSDS, VP2INTERSECTD, and VP2INTERSECTQ. On ARM/AArch64 this is used by vector load and store
//! instructions that can load or store multiple registers at once.
kConsecutive = 0x00000008u,
//! The `extendByteMask()` represents a zero extension.
kZExt = 0x00000010u,
//! Register operand must use \ref OpRWInfo::physId().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref OpRWInfo::physId().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
//!
//! X86 Specific
//! ------------
//!
//! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
kMemFake = 0x000000400u,
//! Base register of the memory operand will be read.
kMemBaseRead = 0x00001000u,
//! Base register of the memory operand will be written.
kMemBaseWrite = 0x00002000u,
//! Base register of the memory operand will be read & written.
kMemBaseRW = 0x00003000u,
//! Index register of the memory operand will be read.
kMemIndexRead = 0x00004000u,
//! Index register of the memory operand will be written.
kMemIndexWrite = 0x00008000u,
//! Index register of the memory operand will be read & written.
kMemIndexRW = 0x0000C000u,
//! Base register of the memory operand will be modified before the operation.
kMemBasePreModify = 0x00010000u,
//! Base register of the memory operand will be modified after the operation.
kMemBasePostModify = 0x00020000u
};
ASMJIT_DEFINE_ENUM_FLAGS(OpRWFlags)
// Don't remove these asserts. Read/Write flags are used extensively
// by Compiler and they must always be compatible with constants below.
static_assert(uint32_t(OpRWFlags::kRead) == 0x1, "OpRWFlags::kRead flag must be 0x1");
static_assert(uint32_t(OpRWFlags::kWrite) == 0x2, "OpRWFlags::kWrite flag must be 0x2");
static_assert(uint32_t(OpRWFlags::kRegMem) == 0x4, "OpRWFlags::kRegMem flag must be 0x4");
//! Read/Write information related to a single operand, used by \ref InstRWInfo.
struct OpRWInfo {
//! Read/Write flags, see \ref OpRWInfo::Flags.
uint32_t _opFlags;
//! \name Members
//! \{
//! Read/Write flags.
OpRWFlags _opFlags;
//! Physical register index, if required.
uint8_t _physId;
//! Size of a possible memory operand that can replace a register operand.
uint8_t _rmSize;
//! If non-zero, then this is a consecutive lead register, and the value describes how many registers follow.
uint8_t _consecutiveLeadCount;
//! Reserved for future use.
uint8_t _reserved[2];
uint8_t _reserved[1];
//! Read bit-mask where each bit represents one byte read from Reg/Mem.
uint64_t _readByteMask;
//! Write bit-mask where each bit represents one byte written to Reg/Mem.
@@ -212,61 +406,7 @@ struct OpRWInfo {
//! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _extendByteMask;
//! Flags describe how the operand is accessed and some additional information.
enum Flags : uint32_t {
//! Operand is read.
kRead = 0x00000001u,
//! Operand is written.
kWrite = 0x00000002u,
//! Operand is both read and written.
kRW = 0x00000003u,
//! Register operand can be replaced by a memory operand.
kRegMem = 0x00000004u,
//! The `extendByteMask()` represents a zero extension.
kZExt = 0x00000010u,
//! Register operand must use \ref physId().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref physId().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
//!
//! X86 Specific
//! ------------
//!
//! Instructions that use such feature include BNDLDX, BNDSTX, and LEA.
kMemFake = 0x000000400u,
//! Base register of the memory operand will be read.
kMemBaseRead = 0x00001000u,
//! Base register of the memory operand will be written.
kMemBaseWrite = 0x00002000u,
//! Base register of the memory operand will be read & written.
kMemBaseRW = 0x00003000u,
//! Index register of the memory operand will be read.
kMemIndexRead = 0x00004000u,
//! Index register of the memory operand will be written.
kMemIndexWrite = 0x00008000u,
//! Index register of the memory operand will be read & written.
kMemIndexRW = 0x0000C000u,
//! Base register of the memory operand will be modified before the operation.
kMemBasePreModify = 0x00010000u,
//! Base register of the memory operand will be modified after the operation.
kMemBasePostModify = 0x00020000u
};
// Don't remove these asserts. Read/Write flags are used extensively
// by Compiler and they must always be compatible with constants below.
static_assert(kRead == 0x1, "OpRWInfo::kRead flag must be 0x1");
static_assert(kWrite == 0x2, "OpRWInfo::kWrite flag must be 0x2");
static_assert(kRegMem == 0x4, "OpRWInfo::kRegMem flag must be 0x4");
//! \}
//! \name Reset
//! \{
@@ -276,20 +416,21 @@ struct OpRWInfo {
//! Resets this operand info (resets all members) and set common information
//! to the given `opFlags`, `regSize`, and possibly `physId`.
inline void reset(uint32_t opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
inline void reset(OpRWFlags opFlags, uint32_t regSize, uint32_t physId = BaseReg::kIdBad) noexcept {
_opFlags = opFlags;
_physId = uint8_t(physId);
_rmSize = uint8_t((opFlags & kRegMem) ? regSize : uint32_t(0));
_rmSize = Support::test(opFlags, OpRWFlags::kRegMem) ? uint8_t(regSize) : uint8_t(0);
_consecutiveLeadCount = 0;
_resetReserved();
uint64_t mask = Support::lsbMask<uint64_t>(regSize);
_readByteMask = opFlags & kRead ? mask : uint64_t(0);
_writeByteMask = opFlags & kWrite ? mask : uint64_t(0);
_readByteMask = Support::test(opFlags, OpRWFlags::kRead) ? mask : uint64_t(0);
_writeByteMask = Support::test(opFlags, OpRWFlags::kWrite) ? mask : uint64_t(0);
_extendByteMask = 0;
}
inline void _resetReserved() noexcept {
memset(_reserved, 0, sizeof(_reserved));
_reserved[0] = 0;
}
//! \}
@@ -297,77 +438,77 @@ struct OpRWInfo {
//! \name Operand Flags
//! \{
//! Returns operand flags, see \ref Flags.
inline uint32_t opFlags() const noexcept { return _opFlags; }
//! Returns operand flags.
inline OpRWFlags opFlags() const noexcept { return _opFlags; }
//! Tests whether operand flags contain the given `flag`.
inline bool hasOpFlag(uint32_t flag) const noexcept { return (_opFlags & flag) != 0; }
inline bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
//! Adds the given `flags` to operand flags.
inline void addOpFlags(uint32_t flags) noexcept { _opFlags |= flags; }
inline void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
//! Removes the given `flags` from operand flags.
inline void clearOpFlags(uint32_t flags) noexcept { _opFlags &= ~flags; }
inline void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
//! Tests whether this operand is read from.
inline bool isRead() const noexcept { return hasOpFlag(kRead); }
inline bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
//! Tests whether this operand is written to.
inline bool isWrite() const noexcept { return hasOpFlag(kWrite); }
inline bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write.
inline bool isReadWrite() const noexcept { return (_opFlags & kRW) == kRW; }
inline bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only.
inline bool isReadOnly() const noexcept { return (_opFlags & kRW) == kRead; }
inline bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only.
inline bool isWriteOnly() const noexcept { return (_opFlags & kRW) == kWrite; }
inline bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers.
inline uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
inline bool isRm() const noexcept { return hasOpFlag(kRegMem); }
inline bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended.
inline bool isZExt() const noexcept { return hasOpFlag(kZExt); }
inline bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
//! \}
//! \name Memory Flags
//! \{
//! Tests whether this is a fake memory operand, which is only used, because
//! of encoding. Fake memory operands do not access any memory, they are only
//! used to encode registers.
inline bool isMemFake() const noexcept { return hasOpFlag(kMemFake); }
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers.
inline bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
inline bool isMemBaseUsed() const noexcept { return (_opFlags & kMemBaseRW) != 0; }
inline bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers.
inline bool isMemBaseRead() const noexcept { return hasOpFlag(kMemBaseRead); }
inline bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
inline bool isMemBaseWrite() const noexcept { return hasOpFlag(kMemBaseWrite); }
inline bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
inline bool isMemBaseReadWrite() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRW; }
inline bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
inline bool isMemBaseReadOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseRead; }
inline bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
inline bool isMemBaseWriteOnly() const noexcept { return (_opFlags & kMemBaseRW) == kMemBaseWrite; }
inline bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses
//! it to calculate the target address.
inline bool isMemBasePreModify() const noexcept { return hasOpFlag(kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses
//! it to calculate the target address.
inline bool isMemBasePostModify() const noexcept { return hasOpFlag(kMemBasePostModify); }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
inline bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
inline bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
inline bool isMemIndexUsed() const noexcept { return (_opFlags & kMemIndexRW) != 0; }
inline bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers.
inline bool isMemIndexRead() const noexcept { return hasOpFlag(kMemIndexRead); }
inline bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
inline bool isMemIndexWrite() const noexcept { return hasOpFlag(kMemIndexWrite); }
inline bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
inline bool isMemIndexReadWrite() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRW; }
inline bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
inline bool isMemIndexReadOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexRead; }
inline bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
inline bool isMemIndexWriteOnly() const noexcept { return (_opFlags & kMemIndexRW) == kMemIndexWrite; }
inline bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \}
@@ -415,18 +556,17 @@ struct OpRWInfo {
//! \}
};
// ============================================================================
// [asmjit::InstRWInfo]
// ============================================================================
//! Read/Write information of an instruction.
struct InstRWInfo {
//! \name Members
//! \{
//! Instruction flags (there are no flags at the moment, this field is reserved).
uint32_t _instFlags;
//! Mask of CPU flags read.
uint32_t _readFlags;
//! Mask of CPU flags written.
uint32_t _writeFlags;
//! CPU flags read.
CpuRWFlags _readFlags;
//! CPU flags written.
CpuRWFlags _writeFlags;
//! Count of operands.
uint8_t _opCount;
//! CPU feature required for replacing register operand with memory operand.
@@ -438,6 +578,8 @@ struct InstRWInfo {
//! Read/Write info of instruction operands.
OpRWInfo _operands[Globals::kMaxOpCount];
//! \}
//! \name Commons
//! \{
@@ -446,40 +588,29 @@ struct InstRWInfo {
//! \}
//! \name Instruction Flags
//!
//! \name CPU Flags Information
//! \{
inline uint32_t instFlags() const noexcept { return _instFlags; }
inline bool hasInstFlag(uint32_t flag) const noexcept { return (_instFlags & flag) != 0; }
//! }
//! \name CPU Flags Read/Write Information
//! \{
//! Returns read flags of the instruction.
inline uint32_t readFlags() const noexcept { return _readFlags; }
//! Returns write flags of the instruction.
inline uint32_t writeFlags() const noexcept { return _writeFlags; }
//! Returns a mask of CPU flags read.
inline CpuRWFlags readFlags() const noexcept { return _readFlags; }
//! Returns a mask of CPU flags written.
inline CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
//! \}
//! \name Reg/Mem Information
//! \{
//! Returns the CPU feature required to replace a register operand with memory
//! operand. If the returned feature is zero (none) then this instruction
//! either doesn't provide memory operand combination or there is no extra
//! CPU feature required.
//! Returns the CPU feature required to replace a register operand with memory operand. If the returned feature is
//! zero (none) then this instruction either doesn't provide memory operand combination or there is no extra CPU
//! feature required.
//!
//! X86 Specific
//! ------------
//!
//! Some AVX+ instructions may require extra features for replacing registers
//! with memory operands, for example VPSLLDQ instruction only supports
//! 'reg/reg/imm' combination on AVX/AVX2 capable CPUs and requires AVX-512 for
//! 'reg/mem/imm' combination.
//! Some AVX+ instructions may require extra features for replacing registers with memory operands, for example
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination.
inline uint32_t rmFeature() const noexcept { return _rmFeature; }
//! \}
@@ -505,49 +636,43 @@ struct InstRWInfo {
//! \}
};
// ============================================================================
// [asmjit::InstAPI]
// ============================================================================
//! Validation flags that can be used with \ref InstAPI::validate().
enum class ValidationFlags : uint32_t {
//! No flags.
kNone = 0,
//! Allow virtual registers in the instruction.
kEnableVirtRegs = 0x01u
};
ASMJIT_DEFINE_ENUM_FLAGS(ValidationFlags)
//! Instruction API.
namespace InstAPI {
//! Validation flags that can be used with \ref InstAPI::validate().
enum ValidationFlags : uint32_t {
//! Allow virtual registers in the instruction.
kValidationFlagVirtRegs = 0x01u
};
#ifndef ASMJIT_NO_TEXT
//! Appends the name of the instruction specified by `instId` and `instOptions`
//! into the `output` string.
//! Appends the name of the instruction specified by `instId` and `instOptions` into the `output` string.
//!
//! \note Instruction options would only affect instruction prefix & suffix,
//! other options would be ignored. If `instOptions` is zero then only raw
//! instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(uint32_t arch, uint32_t instId, String& output) noexcept;
//! \note Instruction options would only affect instruction prefix & suffix, other options would be ignored.
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, String& output) noexcept;
//! Parses an instruction name in the given string `s`. Length is specified
//! by `len` argument, which can be `SIZE_MAX` if `s` is known to be null
//! terminated.
//! Parses an instruction name in the given string `s`. Length is specified by `len` argument, which can be
//! `SIZE_MAX` if `s` is known to be null terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such
//! instruction exists.
ASMJIT_API uint32_t stringToInstId(uint32_t arch, const char* s, size_t len) noexcept;
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the validation `flags`, see
//! \ref ValidationFlags.
ASMJIT_API Error validate(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, uint32_t validationFlags = 0) noexcept;
//! Validates the given instruction considering the given `validationFlags`.
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
//! Gets Read/Write information of the given instruction.
ASMJIT_API Error queryRWInfo(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
ASMJIT_API Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
//! Gets CPU features required by the given instruction.
ASMJIT_API Error queryFeatures(uint32_t arch, const BaseInst& inst, const Operand_* operands, size_t opCount, BaseFeatures* out) noexcept;
ASMJIT_API Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstAPI}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
@@ -35,24 +17,24 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::JitAllocator - Constants]
// ============================================================================
// JitAllocator - Constants
// ========================
enum JitAllocatorConstants : uint32_t {
//! Number of pools to use when `JitAllocator::kOptionUseMultiplePools` is set.
//!
//! Each pool increases granularity twice to make memory management more
//! efficient. Ideal number of pools appears to be 3 to 4 as it distributes
//! small and large functions properly.
kJitAllocatorMultiPoolCount = 3,
//! Number of pools to use when `JitAllocatorOptions::kUseMultiplePools` is set.
//!
//! Each pool increases granularity twice to make memory management more
//! efficient. Ideal number of pools appears to be 3 to 4 as it distributes
//! small and large functions properly.
static constexpr uint32_t kJitAllocatorMultiPoolCount = 3;
//! Minimum granularity (and the default granularity for pool #0).
kJitAllocatorBaseGranularity = 64,
//! Minimum granularity (and the default granularity for pool #0).
static constexpr uint32_t kJitAllocatorBaseGranularity = 64;
//! Maximum block size (32MB).
kJitAllocatorMaxBlockSize = 1024 * 1024 * 32
};
//! Maximum block size (32MB).
static constexpr uint32_t kJitAllocatorMaxBlockSize = 1024 * 1024 * 32;
// JitAllocator - Fill Pattern
// ===========================
static inline uint32_t JitAllocator_defaultFillPattern() noexcept {
// X86 and X86_64 - 4x 'int3' instruction.
@@ -63,9 +45,8 @@ static inline uint32_t JitAllocator_defaultFillPattern() noexcept {
return 0u;
}
// ============================================================================
// [asmjit::BitVectorRangeIterator]
// ============================================================================
// JitAllocator - BitVectorRangeIterator
// =====================================
template<typename T, uint32_t B>
class BitVectorRangeIterator {
@@ -78,19 +59,19 @@ public:
enum : uint32_t { kBitWordSize = Support::bitSizeOf<T>() };
enum : T { kXorMask = B == 0 ? Support::allOnes<T>() : T(0) };
ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords);
}
ASMJIT_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_FORCE_INLINE BitVectorRangeIterator(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
init(data, numBitWords, start, end);
}
ASMJIT_INLINE void init(const T* data, size_t numBitWords) noexcept {
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords) noexcept {
init(data, numBitWords, 0, numBitWords * kBitWordSize);
}
ASMJIT_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_FORCE_INLINE void init(const T* data, size_t numBitWords, size_t start, size_t end) noexcept {
ASMJIT_ASSERT(numBitWords >= (end + kBitWordSize - 1) / kBitWordSize);
DebugUtils::unused(numBitWords);
@@ -107,7 +88,7 @@ public:
_bitWord = bitWord;
}
ASMJIT_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
ASMJIT_FORCE_INLINE bool nextRange(size_t* rangeStart, size_t* rangeEnd, size_t rangeHint = std::numeric_limits<size_t>::max()) noexcept {
// Skip all empty BitWords.
while (_bitWord == 0) {
_idx += kBitWordSize;
@@ -153,9 +134,8 @@ public:
}
};
// ============================================================================
// [asmjit::JitAllocator - Pool]
// ============================================================================
// JitAllocator - Pool
// ===================
class JitAllocatorBlock;
@@ -163,6 +143,27 @@ class JitAllocatorPool {
public:
ASMJIT_NONCOPYABLE(JitAllocatorPool)
//! Double linked list of blocks.
ZoneList<JitAllocatorBlock> blocks;
//! Where to start looking first.
JitAllocatorBlock* cursor;
//! Count of blocks.
uint32_t blockCount;
//! Allocation granularity.
uint16_t granularity;
//! Log2(granularity).
uint8_t granularityLog2;
//! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty).
uint8_t emptyBlockCount;
//! Number of bits reserved across all blocks.
size_t totalAreaSize;
//! Number of bits used across all blocks.
size_t totalAreaUsed;
//! Overhead of all blocks (in bytes).
size_t totalOverheadBytes;
inline JitAllocatorPool(uint32_t granularity) noexcept
: blocks(),
cursor(nullptr),
@@ -190,32 +191,10 @@ public:
using namespace Support;
return alignUp<size_t>(areaSize, kBitWordSizeInBits) / kBitWordSizeInBits;
}
//! Double linked list of blocks.
ZoneList<JitAllocatorBlock> blocks;
//! Where to start looking first.
JitAllocatorBlock* cursor;
//! Count of blocks.
uint32_t blockCount;
//! Allocation granularity.
uint16_t granularity;
//! Log2(granularity).
uint8_t granularityLog2;
//! Count of empty blocks (either 0 or 1 as we won't keep more blocks empty).
uint8_t emptyBlockCount;
//! Number of bits reserved across all blocks.
size_t totalAreaSize;
//! Number of bits used across all blocks.
size_t totalAreaUsed;
//! Overhead of all blocks (in bytes).
size_t totalOverheadBytes;
};
// ============================================================================
// [asmjit::JitAllocator - Block]
// ============================================================================
// JitAllocator - Block
// ====================
class JitAllocatorBlock : public ZoneTreeNodeT<JitAllocatorBlock>,
public ZoneListNode<JitAllocatorBlock> {
@@ -280,7 +259,7 @@ public:
inline JitAllocatorPool* pool() const noexcept { return _pool; }
inline uint8_t* roPtr() const noexcept { return static_cast<uint8_t*>(_mapping.ro); }
inline uint8_t* rxPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rx); }
inline uint8_t* rwPtr() const noexcept { return static_cast<uint8_t*>(_mapping.rw); }
inline bool hasFlag(uint32_t f) const noexcept { return (_flags & f) != 0; }
@@ -376,30 +355,25 @@ public:
}
// RBTree default CMP uses '<' and '>' operators.
inline bool operator<(const JitAllocatorBlock& other) const noexcept { return roPtr() < other.roPtr(); }
inline bool operator>(const JitAllocatorBlock& other) const noexcept { return roPtr() > other.roPtr(); }
inline bool operator<(const JitAllocatorBlock& other) const noexcept { return rxPtr() < other.rxPtr(); }
inline bool operator>(const JitAllocatorBlock& other) const noexcept { return rxPtr() > other.rxPtr(); }
// Special implementation for querying blocks by `key`, which must be in `[BlockPtr, BlockPtr + BlockSize)` range.
inline bool operator<(const uint8_t* key) const noexcept { return roPtr() + _blockSize <= key; }
inline bool operator>(const uint8_t* key) const noexcept { return roPtr() > key; }
inline bool operator<(const uint8_t* key) const noexcept { return rxPtr() + _blockSize <= key; }
inline bool operator>(const uint8_t* key) const noexcept { return rxPtr() > key; }
};
// ============================================================================
// [asmjit::JitAllocator - PrivateImpl]
// ============================================================================
// JitAllocator - PrivateImpl
// ==========================
class JitAllocatorPrivateImpl : public JitAllocator::Impl {
public:
inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept
: JitAllocator::Impl {},
pools(pools),
poolCount(poolCount) {}
inline ~JitAllocatorPrivateImpl() noexcept {}
//! Lock for thread safety.
mutable Lock lock;
//! System page size (also a minimum block size).
uint32_t pageSize;
//! Number of active allocations.
size_t allocationCount;
//! Blocks from all pools in RBTree.
ZoneTree<JitAllocatorBlock> tree;
@@ -407,14 +381,21 @@ public:
JitAllocatorPool* pools;
//! Number of allocator pools.
size_t poolCount;
inline JitAllocatorPrivateImpl(JitAllocatorPool* pools, size_t poolCount) noexcept
: JitAllocator::Impl {},
pageSize(0),
allocationCount(0),
pools(pools),
poolCount(poolCount) {}
inline ~JitAllocatorPrivateImpl() noexcept {}
};
static const JitAllocator::Impl JitAllocatorImpl_none {};
static const JitAllocator::CreateParams JitAllocatorParams_none {};
// ============================================================================
// [asmjit::JitAllocator - Utilities]
// ============================================================================
// JitAllocator - Utilities
// ========================
static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::CreateParams* params) noexcept {
VirtMem::Info vmInfo = VirtMem::info();
@@ -422,14 +403,14 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
if (!params)
params = &JitAllocatorParams_none;
uint32_t options = params->options;
JitAllocatorOptions options = params->options;
uint32_t blockSize = params->blockSize;
uint32_t granularity = params->granularity;
uint32_t fillPattern = params->fillPattern;
// Setup pool count to [1..3].
size_t poolCount = 1;
if (options & JitAllocator::kOptionUseMultiplePools)
if (Support::test(options, JitAllocatorOptions::kUseMultiplePools))
poolCount = kJitAllocatorMultiPoolCount;;
// Setup block size [64kB..256MB].
@@ -441,7 +422,7 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
granularity = kJitAllocatorBaseGranularity;
// Setup fill-pattern.
if (!(options & JitAllocator::kOptionCustomFillPattern))
if (uint32_t(options & JitAllocatorOptions::kCustomFillPattern) == 0)
fillPattern = JitAllocator_defaultFillPattern();
size_t size = sizeof(JitAllocatorPrivateImpl) + sizeof(JitAllocatorPool) * poolCount;
@@ -533,26 +514,32 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp
uint32_t blockFlags = 0;
if (bitWords != nullptr) {
if (impl->options & JitAllocator::kOptionUseDualMapping) {
err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::kAccessRWX);
if (Support::test(impl->options, JitAllocatorOptions::kUseDualMapping)) {
err = VirtMem::allocDualMapping(&virtMem, blockSize, VirtMem::MemoryFlags::kAccessRWX);
blockFlags |= JitAllocatorBlock::kFlagDualMapped;
}
else {
err = VirtMem::alloc(&virtMem.ro, blockSize, VirtMem::kAccessRWX);
virtMem.rw = virtMem.ro;
err = VirtMem::alloc(&virtMem.rx, blockSize, VirtMem::MemoryFlags::kAccessRWX);
virtMem.rw = virtMem.rx;
}
}
// Out of memory.
if (ASMJIT_UNLIKELY(!block || !bitWords || err != kErrorOk)) {
if (bitWords) ::free(bitWords);
if (block) ::free(block);
if (bitWords)
::free(bitWords);
if (block)
::free(block);
return nullptr;
}
// Fill the memory if the secure mode is enabled.
if (impl->options & JitAllocator::kOptionFillUnusedMemory)
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
VirtMem::ProtectJitReadWriteScope scope(virtMem.rw, blockSize);
JitAllocatorImpl_fillPattern(virtMem.rw, impl->fillPattern, blockSize);
}
memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
@@ -564,7 +551,7 @@ static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAlloc
if (block->hasFlag(JitAllocatorBlock::kFlagDualMapped))
VirtMem::releaseDualMapping(&block->_mapping, block->blockSize());
else
VirtMem::release(block->roPtr(), block->blockSize());
VirtMem::release(block->rxPtr(), block->blockSize());
::free(block->_usedBitVector);
::free(block);
@@ -603,26 +590,31 @@ static void JitAllocatorImpl_removeBlock(JitAllocatorPrivateImpl* impl, JitAlloc
}
static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
JitAllocatorPool* pool = block->pool();
if (block->hasFlag(JitAllocatorBlock::kFlagEmpty))
return;
JitAllocatorPool* pool = block->pool();
uint32_t areaSize = block->areaSize();
uint32_t granularity = pool->granularity;
size_t numBitWords = pool->bitWordCountFromAreaSize(areaSize);
if (impl->options & JitAllocator::kOptionFillUnusedMemory) {
VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite);
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
uint8_t* rwPtr = block->rwPtr();
for (size_t i = 0; i < numBitWords; i++) {
Support::BitWordIterator<Support::BitWord> it(block->_usedBitVector[i]);
while (it.hasNext()) {
size_t index = it.next();
JitAllocatorImpl_fillPattern(rwPtr + index * granularity , impl->fillPattern, granularity);
}
rwPtr += Support::bitSizeOf<Support::BitWord>() * granularity;
BitVectorRangeIterator<Support::BitWord, 0> it(block->_usedBitVector, pool->bitWordCountFromAreaSize(block->areaSize()));
size_t rangeStart;
size_t rangeEnd;
while (it.nextRange(&rangeStart, &rangeEnd)) {
uint8_t* spanPtr = rwPtr + rangeStart * granularity;
size_t spanSize = (rangeEnd - rangeStart) * granularity;
JitAllocatorImpl_fillPattern(spanPtr, impl->fillPattern, spanSize);
VirtMem::flushInstructionCache(spanPtr, spanSize);
}
}
VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute);
memset(block->_usedBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
memset(block->_stopBitVector, 0, size_t(numBitWords) * sizeof(Support::BitWord));
@@ -635,9 +627,8 @@ static void JitAllocatorImpl_wipeOutBlock(JitAllocatorPrivateImpl* impl, JitAllo
block->clearFlags(JitAllocatorBlock::kFlagDirty);
}
// ============================================================================
// [asmjit::JitAllocator - Construction / Destruction]
// ============================================================================
// JitAllocator - Construction & Destruction
// =========================================
JitAllocator::JitAllocator(const CreateParams* params) noexcept {
_impl = JitAllocatorImpl_new(params);
@@ -649,15 +640,14 @@ JitAllocator::~JitAllocator() noexcept {
if (_impl == &JitAllocatorImpl_none)
return;
reset(Globals::kResetHard);
reset(ResetPolicy::kHard);
JitAllocatorImpl_destroy(static_cast<JitAllocatorPrivateImpl*>(_impl));
}
// ============================================================================
// [asmjit::JitAllocator - Reset]
// ============================================================================
// JitAllocator - Reset
// ====================
void JitAllocator::reset(uint32_t resetPolicy) noexcept {
void JitAllocator::reset(ResetPolicy resetPolicy) noexcept {
if (_impl == &JitAllocatorImpl_none)
return;
@@ -670,7 +660,7 @@ void JitAllocator::reset(uint32_t resetPolicy) noexcept {
JitAllocatorBlock* block = pool.blocks.first();
JitAllocatorBlock* blockToKeep = nullptr;
if (resetPolicy != Globals::kResetHard && !(impl->options & kOptionImmediateRelease)) {
if (resetPolicy != ResetPolicy::kHard && uint32_t(impl->options & JitAllocatorOptions::kImmediateRelease) == 0) {
blockToKeep = block;
block = block->next();
}
@@ -693,9 +683,8 @@ void JitAllocator::reset(uint32_t resetPolicy) noexcept {
}
}
// ============================================================================
// [asmjit::JitAllocator - Statistics]
// ============================================================================
// JitAllocator - Statistics
// =========================
JitAllocator::Statistics JitAllocator::statistics() const noexcept {
Statistics statistics;
@@ -713,23 +702,24 @@ JitAllocator::Statistics JitAllocator::statistics() const noexcept {
statistics._usedSize += size_t(pool.totalAreaUsed) * pool.granularity;
statistics._overheadSize += size_t(pool.totalOverheadBytes);
}
statistics._allocationCount = impl->allocationCount;
}
return statistics;
}
// ============================================================================
// [asmjit::JitAllocator - Alloc / Release]
// ============================================================================
// JitAllocator - Alloc & Release
// ==============================
Error JitAllocator::alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept {
Error JitAllocator::alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
return DebugUtils::errored(kErrorNotInitialized);
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
constexpr uint32_t kNoIndex = std::numeric_limits<uint32_t>::max();
*roPtrOut = nullptr;
*rxPtrOut = nullptr;
*rwPtrOut = nullptr;
// Align to the minimum granularity by default.
@@ -815,49 +805,56 @@ Error JitAllocator::alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcep
}
// Update statistics.
impl->allocationCount++;
block->markAllocatedArea(areaIndex, areaIndex + areaSize);
// Return a pointer to the allocated memory.
size_t offset = pool->byteSizeFromAreaSize(areaIndex);
ASMJIT_ASSERT(offset <= block->blockSize() - size);
*roPtrOut = block->roPtr() + offset;
*rxPtrOut = block->rxPtr() + offset;
*rwPtrOut = block->rwPtr() + offset;
return kErrorOk;
}
Error JitAllocator::release(void* roPtr) noexcept {
Error JitAllocator::release(void* rxPtr) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(!roPtr))
if (ASMJIT_UNLIKELY(!rxPtr))
return DebugUtils::errored(kErrorInvalidArgument);
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(roPtr));
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rxPtr));
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorInvalidState);
// Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool();
size_t offset = (size_t)((uint8_t*)roPtr - block->roPtr());
size_t offset = (size_t)((uint8_t*)rxPtr - block->rxPtr());
// The first bit representing the allocated area and its size.
uint32_t areaIndex = uint32_t(offset >> pool->granularityLog2);
uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaIndex, true)) + 1;
uint32_t areaSize = areaEnd - areaIndex;
impl->allocationCount--;
block->markReleasedArea(areaIndex, areaEnd);
// Fill the released memory if the secure mode is enabled.
if (impl->options & kOptionFillUnusedMemory)
JitAllocatorImpl_fillPattern(block->rwPtr() + areaIndex * pool->granularity, impl->fillPattern, areaSize * pool->granularity);
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
uint8_t* spanPtr = block->rwPtr() + areaIndex * pool->granularity;
size_t spanSize = areaSize * pool->granularity;
VirtMem::ProtectJitReadWriteScope scope(spanPtr, spanSize);
JitAllocatorImpl_fillPattern(spanPtr, impl->fillPattern, spanSize);
}
// Release the whole block if it became empty.
if (block->areaUsed() == 0) {
if (pool->emptyBlockCount || (impl->options & kOptionImmediateRelease)) {
if (pool->emptyBlockCount || Support::test(impl->options, JitAllocatorOptions::kImmediateRelease)) {
JitAllocatorImpl_removeBlock(impl, block);
JitAllocatorImpl_deleteBlock(impl, block);
}
@@ -869,26 +866,26 @@ Error JitAllocator::release(void* roPtr) noexcept {
return kErrorOk;
}
Error JitAllocator::shrink(void* roPtr, size_t newSize) noexcept {
Error JitAllocator::shrink(void* rxPtr, size_t newSize) noexcept {
if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
return DebugUtils::errored(kErrorNotInitialized);
if (ASMJIT_UNLIKELY(!roPtr))
if (ASMJIT_UNLIKELY(!rxPtr))
return DebugUtils::errored(kErrorInvalidArgument);
if (ASMJIT_UNLIKELY(newSize == 0))
return release(roPtr);
return release(rxPtr);
JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
LockGuard guard(impl->lock);
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(roPtr));
JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rxPtr));
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorInvalidArgument);
// Offset relative to the start of the block.
JitAllocatorPool* pool = block->pool();
size_t offset = (size_t)((uint8_t*)roPtr - block->roPtr());
size_t offset = (size_t)((uint8_t*)rxPtr - block->rxPtr());
// The first bit representing the allocated area and its size.
uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
@@ -905,16 +902,15 @@ Error JitAllocator::shrink(void* roPtr, size_t newSize) noexcept {
block->markShrunkArea(areaStart + areaShrunkSize, areaEnd);
// Fill released memory if the secure mode is enabled.
if (impl->options & kOptionFillUnusedMemory)
if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory))
JitAllocatorImpl_fillPattern(block->rwPtr() + (areaStart + areaShrunkSize) * pool->granularity, fillPattern(), areaDiff * pool->granularity);
}
return kErrorOk;
}
// ============================================================================
// [asmjit::JitAllocator - Unit]
// ============================================================================
// JitAllocator - Tests
// ====================
#if defined(ASMJIT_TEST)
// A pseudo random number generator based on a paper by Sebastiano Vigna:
@@ -1034,14 +1030,14 @@ public:
}
void* alloc(size_t size) noexcept {
void* roPtr;
void* rxPtr;
void* rwPtr;
Error err = _allocator.alloc(&roPtr, &rwPtr, size);
Error err = _allocator.alloc(&rxPtr, &rwPtr, size);
EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size);
_insert(roPtr, size);
return roPtr;
_insert(rxPtr, size);
return rxPtr;
}
void release(void* p) noexcept {
@@ -1099,7 +1095,7 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
}
for (size_t j = 0; j < kPatternSize; j++) {
EXPECT(in[j] == out[j], "Invalid pattern detected at [%zu] (%llX != %llX", j, (unsigned long long)in[j], (unsigned long long)out[j]);
EXPECT(in[j] == out[j], "Invalid pattern detected at [%zu] (%llX != %llX)", j, (unsigned long long)in[j], (unsigned long long)out[j]);
}
}
}
@@ -1109,20 +1105,20 @@ UNIT(jit_allocator) {
struct TestParams {
const char* name;
uint32_t options;
JitAllocatorOptions options;
uint32_t blockSize;
uint32_t granularity;
};
static TestParams testParams[] = {
{ "Default", 0, 0, 0 },
{ "16MB blocks", 0, 16 * 1024 * 1024, 0 },
{ "256B granularity", 0, 0, 256 },
{ "kOptionUseDualMapping", JitAllocator::kOptionUseDualMapping, 0, 0 },
{ "kOptionUseMultiplePools", JitAllocator::kOptionUseMultiplePools, 0, 0 },
{ "kOptionFillUnusedMemory", JitAllocator::kOptionFillUnusedMemory, 0, 0 },
{ "kOptionImmediateRelease", JitAllocator::kOptionImmediateRelease, 0, 0 },
{ "kOptionUseDualMapping | kOptionFillUnusedMemory", JitAllocator::kOptionUseDualMapping | JitAllocator::kOptionFillUnusedMemory, 0, 0 }
{ "Default", JitAllocatorOptions::kNone, 0, 0 },
{ "16MB blocks", JitAllocatorOptions::kNone, 16 * 1024 * 1024, 0 },
{ "256B granularity", JitAllocatorOptions::kNone, 0, 256 },
{ "kUseDualMapping", JitAllocatorOptions::kUseDualMapping, 0, 0 },
{ "kUseMultiplePools", JitAllocatorOptions::kUseMultiplePools, 0, 0 },
{ "kFillUnusedMemory", JitAllocatorOptions::kFillUnusedMemory, 0, 0 },
{ "kImmediateRelease", JitAllocatorOptions::kImmediateRelease, 0, 0 },
{ "kUseDualMapping | kFillUnusedMemory", JitAllocatorOptions::kUseDualMapping | JitAllocatorOptions::kFillUnusedMemory, 0, 0 }
};
INFO("BitVectorRangeIterator<uint32_t>");

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
#define ASMJIT_CORE_JITALLOCATOR_H_INCLUDED
@@ -35,37 +17,67 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::JitAllocator]
// ============================================================================
//! Options used by \ref JitAllocator.
enum class JitAllocatorOptions : uint32_t {
//! No options.
kNone = 0,
//! Enables the use of an anonymous memory-mapped memory that is mapped into two buffers having a different pointer.
//! The first buffer has read and execute permissions and the second buffer has read+write permissions.
//!
//! See \ref VirtMem::allocDualMapping() for more details about this feature.
kUseDualMapping = 0x00000001u,
//! Enables the use of multiple pools with increasing granularity instead of a single pool. This flag would enable
//! 3 internal pools in total having 64, 128, and 256 bytes granularity.
//!
//! This feature is only recommended for users that generate a lot of code and would like to minimize the overhead
//! of `JitAllocator` itself by having blocks of different allocation granularities. Using this feature only for
//! few allocations won't pay off as the allocator may need to create more blocks initially before it can take the
//! advantage of variable block granularity.
kUseMultiplePools = 0x00000002u,
//! Always fill reserved memory by a fill-pattern.
//!
//! Causes a new block to be cleared by the fill pattern and freshly released memory to be cleared before making
//! it ready for another use.
kFillUnusedMemory = 0x00000004u,
//! When this flag is set the allocator would immediately release unused blocks during `release()` or `reset()`.
//! When this flag is not set the allocator would keep one empty block in each pool to prevent excessive virtual
//! memory allocations and deallocations in border cases, which involve constantly allocating and deallocating a
//! single block caused by repetitive calling `alloc()` and `release()` when the allocator has either no blocks
//! or have all blocks fully occupied.
kImmediateRelease = 0x00000008u,
//! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
kCustomFillPattern = 0x10000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(JitAllocatorOptions)
//! A simple implementation of memory manager that uses `asmjit::VirtMem`
//! functions to manage virtual memory for JIT compiled code.
//!
//! Implementation notes:
//!
//! - Granularity of allocated blocks is different than granularity for a typical
//! C malloc. In addition, the allocator can use several memory pools having a
//! different granularity to minimize the maintenance overhead. Multiple pools
//! - Granularity of allocated blocks is different than granularity for a typical C malloc. In addition, the allocator
//! can use several memory pools having a different granularity to minimize the maintenance overhead. Multiple pools
//! feature requires `kFlagUseMultiplePools` flag to be set.
//!
//! - The allocator doesn't store any information in executable memory, instead,
//! the implementation uses two bit-vectors to manage allocated memory of each
//! allocator-block. The first bit-vector called 'used' is used to track used
//! memory (where each bit represents memory size defined by granularity) and
//! the second bit vector called 'stop' is used as a sentinel to mark where
//! the allocated area ends.
//! - The allocator doesn't store any information in executable memory, instead, the implementation uses two
//! bit-vectors to manage allocated memory of each allocator-block. The first bit-vector called 'used' is used to
//! track used memory (where each bit represents memory size defined by granularity) and the second bit vector called
//! 'stop' is used as a sentinel to mark where the allocated area ends.
//!
//! - Internally, the allocator also uses RB tree to keep track of all blocks
//! across all pools. Each inserted block is added to the tree so it can be
//! matched fast during `release()` and `shrink()`.
//! - Internally, the allocator also uses RB tree to keep track of all blocks across all pools. Each inserted block is
//! added to the tree so it can be matched fast during `release()` and `shrink()`.
class JitAllocator {
public:
ASMJIT_NONCOPYABLE(JitAllocator)
struct Impl {
//! Allocator options, see \ref JitAllocator::Options.
uint32_t options;
//! Allocator options.
JitAllocatorOptions options;
//! Base block size (0 if the allocator is not initialized).
uint32_t blockSize;
//! Base granularity (0 if the allocator is not initialized).
@@ -77,45 +89,6 @@ public:
//! Allocator implementation (private).
Impl* _impl;
enum Options : uint32_t {
//! Enables the use of an anonymous memory-mapped memory that is mapped into
//! two buffers having a different pointer. The first buffer has read and
//! execute permissions and the second buffer has read+write permissions.
//!
//! See \ref VirtMem::allocDualMapping() for more details about this feature.
kOptionUseDualMapping = 0x00000001u,
//! Enables the use of multiple pools with increasing granularity instead of
//! a single pool. This flag would enable 3 internal pools in total having
//! 64, 128, and 256 bytes granularity.
//!
//! This feature is only recommended for users that generate a lot of code
//! and would like to minimize the overhead of `JitAllocator` itself by
//! having blocks of different allocation granularities. Using this feature
//! only for few allocations won't pay off as the allocator may need to
//! create more blocks initially before it can take the advantage of
//! variable block granularity.
kOptionUseMultiplePools = 0x00000002u,
//! Always fill reserved memory by a fill-pattern.
//!
//! Causes a new block to be cleared by the fill pattern and freshly
//! released memory to be cleared before making it ready for another use.
kOptionFillUnusedMemory = 0x00000004u,
//! When this flag is set the allocator would immediately release unused
//! blocks during `release()` or `reset()`. When this flag is not set the
//! allocator would keep one empty block in each pool to prevent excessive
//! virtual memory allocations and deallocations in border cases, which
//! involve constantly allocating and deallocating a single block caused
//! by repetitive calling `alloc()` and `release()` when the allocator has
//! either no blocks or have all blocks fully occupied.
kOptionImmediateRelease = 0x00000008u,
//! Use a custom fill pattern, must be combined with `kFlagFillUnusedMemory`.
kOptionCustomFillPattern = 0x10000000u
};
//! \name Construction & Destruction
//! \{
@@ -132,39 +105,35 @@ public:
//! JitAllocator allocator(&params);
//! ```
struct CreateParams {
//! Allocator options, see \ref JitAllocator::Options.
//! Allocator options.
//!
//! No options are used by default.
uint32_t options;
JitAllocatorOptions options = JitAllocatorOptions::kNone;
//! Base size of a single block in bytes (default 64kB).
//!
//! \remarks Block size must be equal or greater to page size and must be
//! power of 2. If the input is not valid then the default block size will
//! be used instead.
uint32_t blockSize;
//! \remarks Block size must be equal to or greater than page size and must be power of 2. If the input is not
//! valid then the default block size will be used instead.
uint32_t blockSize = 0;
//! Base granularity (and also natural alignment) of allocations in bytes
//! (default 64).
//! Base granularity (and also natural alignment) of allocations in bytes (default 64).
//!
//! Since the `JitAllocator` uses bit-arrays to mark used memory the
//! granularity also specifies how many bytes correspond to a single bit in
//! such bit-array. Higher granularity means more waste of virtual memory
//! (as it increases the natural alignment), but smaller bit-arrays as less
//! bits would be required per a single block.
uint32_t granularity;
//! Since the `JitAllocator` uses bit-arrays to mark used memory the granularity also specifies how many bytes
//! correspond to a single bit in such bit-array. Higher granularity means more waste of virtual memory (as it
//! increases the natural alignment), but smaller bit-arrays as less bits would be required per a single block.
uint32_t granularity = 0;
//! Patter to use to fill unused memory.
//!
//! Only used if \ref kOptionCustomFillPattern is set.
uint32_t fillPattern;
//! Only used if \ref JitAllocatorOptions::kCustomFillPattern is set.
uint32_t fillPattern = 0;
// Reset the content of `CreateParams`.
inline void reset() noexcept { memset(this, 0, sizeof(*this)); }
};
//! Creates a `JitAllocator` instance.
explicit ASMJIT_API JitAllocator(const CreateParams* params = nullptr) noexcept;
ASMJIT_API explicit JitAllocator(const CreateParams* params = nullptr) noexcept;
//! Destroys the `JitAllocator` instance and release all blocks held.
ASMJIT_API ~JitAllocator() noexcept;
@@ -172,10 +141,9 @@ public:
//! Free all allocated memory - makes all pointers returned by `alloc()` invalid.
//!
//! \remarks This function is not thread-safe as it's designed to be used when
//! nobody else is using allocator. The reason is that there is no point of
//1 calling `reset()` when the allocator is still in use.
ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
//! \remarks This function is not thread-safe as it's designed to be used when nobody else is using allocator.
//! The reason is that there is no point of calling `reset()` when the allocator is still in use.
ASMJIT_API void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept;
//! \}
@@ -183,9 +151,9 @@ public:
//! \{
//! Returns allocator options, see `Flags`.
inline uint32_t options() const noexcept { return _impl->options; }
inline JitAllocatorOptions options() const noexcept { return _impl->options; }
//! Tests whether the allocator has the given `option` set.
inline bool hasOption(uint32_t option) const noexcept { return (_impl->options & option) != 0; }
inline bool hasOption(JitAllocatorOptions option) const noexcept { return uint32_t(_impl->options & option) != 0; }
//! Returns a base block size (a minimum size of block that the allocator would allocate).
inline uint32_t blockSize() const noexcept { return _impl->blockSize; }
@@ -199,20 +167,31 @@ public:
//! \name Alloc & Release
//! \{
//! Allocate `size` bytes of virtual memory.
//! Allocates a new memory block of the requested `size`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error alloc(void** roPtrOut, void** rwPtrOut, size_t size) noexcept;
//! When the function is successful it stores two pointers in `rxPtrOut` and `rwPtrOut`. The pointers will be
//! different only if `kOptionUseDualMapping` was used to setup the allocator (in that case the `rxPtrOut` would
//! point to a Read+Execute region and `rwPtrOut` would point to a Read+Write region of the same memory-mapped block.
ASMJIT_API Error alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcept;
//! Release a memory returned by `alloc()`.
//! Releases a memory block returned by `alloc()`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error release(void* roPtr) noexcept;
ASMJIT_API Error release(void* rxPtr) noexcept;
//! Free extra memory allocated with `p` by restricting it to `newSize` size.
//! Frees extra memory allocated with `rxPtr` by shrinking it to the given `newSize`.
//!
//! \remarks This function is thread-safe.
ASMJIT_API Error shrink(void* roPtr, size_t newSize) noexcept;
ASMJIT_API Error shrink(void* rxPtr, size_t newSize) noexcept;
//! Queries information about an allocated memory block that contains the given `rxPtr`.
//!
//! The function returns `kErrorOk` when `rxPtr` is matched and fills `rxPtrOut`, `rwPtrOut`, and `sizeOut` output
//! arguments. The returned `rxPtrOut` and `rwPtrOut` pointers point to the beginning of the block, and `sizeOut`
//! describes the total amount of bytes this allocation uses - `sizeOut` will always be aligned to the allocation
//! granularity, so for example if an allocation was 1 byte and the size granularity is 64, the returned `sizeOut`
//! will be 64 bytes, because that's what the allocator sees.
ASMJIT_API Error query(void* rxPtr, void** rxPtrOut, void** rwPtrOut, size_t* sizeOut) const noexcept;
//! \}
@@ -223,6 +202,8 @@ public:
struct Statistics {
//! Number of blocks `JitAllocator` maintains.
size_t _blockCount;
//! Number of active allocations.
size_t _allocationCount;
//! How many bytes are currently used / allocated.
size_t _usedSize;
//! How many bytes are currently reserved by the allocator.
@@ -239,6 +220,8 @@ public:
//! Returns count of blocks managed by `JitAllocator` at the moment.
inline size_t blockCount() const noexcept { return _blockCount; }
//! Returns the number of active allocations.
inline size_t allocationCount() const noexcept { return _allocationCount; }
//! Returns how many bytes are currently used.
inline size_t usedSize() const noexcept { return _usedSize; }

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
@@ -29,44 +11,14 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::JitRuntime - Utilities]
// ============================================================================
// Only useful on non-x86 architectures.
static inline void JitRuntime_flushInstructionCache(const void* p, size_t size) noexcept {
#if ASMJIT_ARCH_X86
DebugUtils::unused(p, size);
#else
# if defined(_WIN32)
// Windows has a built-in support in `kernel32.dll`.
::FlushInstructionCache(::GetCurrentProcess(), p, size);
# elif defined(__GNUC__)
char* start = static_cast<char*>(const_cast<void*>(p));
char* end = start + size;
__builtin___clear_cache(start, end);
# else
DebugUtils::unused(p, size);
# endif
#endif
}
// ============================================================================
// [asmjit::JitRuntime - Construction / Destruction]
// ============================================================================
JitRuntime::JitRuntime(const JitAllocator::CreateParams* params) noexcept
: _allocator(params) {
_environment = hostEnvironment();
_environment.setFormat(Environment::kFormatJIT);
_environment = Environment::host();
_environment.setObjectFormat(ObjectFormat::kJIT);
}
JitRuntime::~JitRuntime() noexcept {}
// ============================================================================
// [asmjit::JitRuntime - Interface]
// ============================================================================
Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
*dst = nullptr;
@@ -77,14 +29,14 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
if (ASMJIT_UNLIKELY(estimatedCodeSize == 0))
return DebugUtils::errored(kErrorNoCodeGenerated);
uint8_t* ro;
uint8_t* rx;
uint8_t* rw;
ASMJIT_PROPAGATE(_allocator.alloc((void**)&ro, (void**)&rw, estimatedCodeSize));
ASMJIT_PROPAGATE(_allocator.alloc((void**)&rx, (void**)&rw, estimatedCodeSize));
// Relocate the code.
Error err = code->relocateToBase(uintptr_t((void*)ro));
Error err = code->relocateToBase(uintptr_t((void*)rx));
if (ASMJIT_UNLIKELY(err)) {
_allocator.release(ro);
_allocator.release(rx);
return err;
}
@@ -92,26 +44,28 @@ Error JitRuntime::_add(void** dst, CodeHolder* code) noexcept {
// in case that some relocations didn't require records in an address table.
size_t codeSize = code->codeSize();
for (Section* section : code->_sections) {
size_t offset = size_t(section->offset());
size_t bufferSize = size_t(section->bufferSize());
size_t virtualSize = size_t(section->virtualSize());
if (codeSize < estimatedCodeSize)
_allocator.shrink(rx, codeSize);
ASMJIT_ASSERT(offset + bufferSize <= codeSize);
memcpy(rw + offset, section->data(), bufferSize);
{
VirtMem::ProtectJitReadWriteScope rwScope(rx, codeSize);
if (virtualSize > bufferSize) {
ASMJIT_ASSERT(offset + virtualSize <= codeSize);
memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
for (Section* section : code->_sections) {
size_t offset = size_t(section->offset());
size_t bufferSize = size_t(section->bufferSize());
size_t virtualSize = size_t(section->virtualSize());
ASMJIT_ASSERT(offset + bufferSize <= codeSize);
memcpy(rw + offset, section->data(), bufferSize);
if (virtualSize > bufferSize) {
ASMJIT_ASSERT(offset + virtualSize <= codeSize);
memset(rw + offset + bufferSize, 0, virtualSize - bufferSize);
}
}
}
if (codeSize < estimatedCodeSize)
_allocator.shrink(ro, codeSize);
flush(ro, codeSize);
*dst = ro;
*dst = rx;
return kErrorOk;
}
@@ -119,10 +73,6 @@ Error JitRuntime::_release(void* p) noexcept {
return _allocator.release(p);
}
void JitRuntime::flush(const void* p, size_t size) noexcept {
JitRuntime_flushInstructionCache(p, size);
}
ASMJIT_END_NAMESPACE
#endif

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_JITRUNTIME_H_INCLUDED
#define ASMJIT_CORE_JITRUNTIME_H_INCLUDED
@@ -38,10 +20,6 @@ class CodeHolder;
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::JitRuntime]
// ============================================================================
//! JIT execution runtime is a special `Target` that is designed to store and
//! execute the generated code.
class ASMJIT_VIRTAPI JitRuntime : public Target {
@@ -55,11 +33,11 @@ public:
//! \{
//! Creates a `JitRuntime` instance.
explicit ASMJIT_API JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
ASMJIT_API explicit JitRuntime(const JitAllocator::CreateParams* params = nullptr) noexcept;
//! Destroys the `JitRuntime` instance.
ASMJIT_API virtual ~JitRuntime() noexcept;
inline void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept {
inline void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept {
_allocator.reset(resetPolicy);
}
@@ -79,12 +57,10 @@ public:
// NOTE: To allow passing function pointers to `add()` and `release()` the
// virtual methods are prefixed with `_` and called from templates instead.
//! Allocates memory needed for a code stored in the `CodeHolder` and relocates
//! the code to the pointer allocated.
//! Allocates memory needed for a code stored in the `CodeHolder` and relocates the code to the pointer allocated.
//!
//! The beginning of the memory allocated for the function is returned in `dst`.
//! If failed `Error` code is returned and `dst` is explicitly set to `nullptr`
//! (this means that you don't have to set it to null before calling `add()`).
//! The beginning of the memory allocated for the function is returned in `dst`. If failed `Error` code is returned
//! and `dst` is explicitly set to `nullptr` (this means that you don't have to set it to null before calling `add()`).
template<typename Func>
inline Error add(Func* dst, CodeHolder* code) noexcept {
return _add(Support::ptr_cast_impl<void**, Func*>(dst), code);
@@ -102,19 +78,6 @@ public:
//! Type-unsafe version of `release()`.
ASMJIT_API virtual Error _release(void* p) noexcept;
//! Flushes an instruction cache.
//!
//! This member function is called after the code has been copied to the
//! destination buffer. It is only useful for JIT code generation as it
//! causes a flush of the processor's cache.
//!
//! Flushing is basically a NOP under X86, but is needed by architectures
//! that do not have a transparent instruction cache like ARM.
//!
//! This function can also be overridden to improve compatibility with tools
//! such as Valgrind, however, it's not an official part of AsmJit.
ASMJIT_API virtual void flush(const void* p, size_t size) noexcept;
//! \}
};

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_LOGGING
@@ -30,18 +12,13 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Logger - Construction / Destruction]
// ============================================================================
// Logger - Implementation
// =======================
Logger::Logger() noexcept
: _options() {}
Logger::~Logger() noexcept {}
// ============================================================================
// [asmjit::Logger - Logging]
// ============================================================================
Error Logger::logf(const char* fmt, ...) noexcept {
Error err;
va_list ap;
@@ -59,18 +36,13 @@ Error Logger::logv(const char* fmt, va_list ap) noexcept {
return log(sb);
}
// ============================================================================
// [asmjit::FileLogger - Construction / Destruction]
// ============================================================================
// FileLogger - Implementation
// ===========================
FileLogger::FileLogger(FILE* file) noexcept
: _file(file) {}
FileLogger::~FileLogger() noexcept {}
// ============================================================================
// [asmjit::FileLogger - Logging]
// ============================================================================
Error FileLogger::_log(const char* data, size_t size) noexcept {
if (!_file)
return kErrorOk;
@@ -82,17 +54,12 @@ Error FileLogger::_log(const char* data, size_t size) noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::StringLogger - Construction / Destruction]
// ============================================================================
// StringLogger - Implementation
// =============================
StringLogger::StringLogger() noexcept {}
StringLogger::~StringLogger() noexcept {}
// ============================================================================
// [asmjit::StringLogger - Logging]
// ============================================================================
Error StringLogger::_log(const char* data, size_t size) noexcept {
return _content.append(data, size);
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_LOGGING_H_INCLUDED
#define ASMJIT_CORE_LOGGING_H_INCLUDED
@@ -35,15 +17,10 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_logging
//! \{
// ============================================================================
// [asmjit::Logger]
// ============================================================================
//! Logging interface.
//!
//! This class can be inherited and reimplemented to fit into your own logging
//! needs. When reimplementing a logger use \ref Logger::_log() method to log
//! customize the output.
//! This class can be inherited and reimplemented to fit into your own logging needs. When reimplementing a logger
//! use \ref Logger::_log() method to log customize the output.
//!
//! There are two `Logger` implementations offered by AsmJit:
//! - \ref FileLogger - logs into a `FILE*`.
@@ -73,25 +50,35 @@ public:
inline FormatOptions& options() noexcept { return _options; }
//! \overload
inline const FormatOptions& options() const noexcept { return _options; }
//! Sets formatting options of this Logger to `options`.
inline void setOptions(const FormatOptions& options) noexcept { _options = options; }
//! Resets formatting options of this Logger to defaults.
inline void resetOptions() noexcept { _options.reset(); }
//! Returns formatting flags, see \ref FormatOptions::Flags.
inline uint32_t flags() const noexcept { return _options.flags(); }
//! Returns formatting flags.
inline FormatFlags flags() const noexcept { return _options.flags(); }
//! Tests whether the logger has the given `flag` enabled.
inline bool hasFlag(uint32_t flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`, see \ref FormatOptions::Flags.
inline void setFlags(uint32_t flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`, see \ref FormatOptions::Flags.
inline void addFlags(uint32_t flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`, see \ref FormatOptions::Flags.
inline void clearFlags(uint32_t flags) noexcept { _options.clearFlags(flags); }
inline bool hasFlag(FormatFlags flag) const noexcept { return _options.hasFlag(flag); }
//! Sets formatting flags to `flags`.
inline void setFlags(FormatFlags flags) noexcept { _options.setFlags(flags); }
//! Enables the given formatting `flags`.
inline void addFlags(FormatFlags flags) noexcept { _options.addFlags(flags); }
//! Disables the given formatting `flags`.
inline void clearFlags(FormatFlags flags) noexcept { _options.clearFlags(flags); }
//! Returns indentation of `type`, see \ref FormatOptions::IndentationType.
inline uint32_t indentation(uint32_t type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `type` to `n` spaces, see \ref
//! FormatOptions::IndentationType.
inline void setIndentation(uint32_t type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `type` to 0 spaces.
inline void resetIndentation(uint32_t type) noexcept { _options.resetIndentation(type); }
//! Returns indentation of a given indentation `group`.
inline uint32_t indentation(FormatIndentationGroup type) const noexcept { return _options.indentation(type); }
//! Sets indentation of the given indentation `group` to `n` spaces.
inline void setIndentation(FormatIndentationGroup type, uint32_t n) noexcept { _options.setIndentation(type, n); }
//! Resets indentation of the given indentation `group` to 0 spaces.
inline void resetIndentation(FormatIndentationGroup type) noexcept { _options.resetIndentation(type); }
//! Returns padding of a given padding `group`.
inline size_t padding(FormatPaddingGroup type) const noexcept { return _options.padding(type); }
//! Sets padding of a given padding `group` to `n`.
inline void setPadding(FormatPaddingGroup type, uint32_t n) noexcept { _options.setPadding(type, n); }
//! Resets padding of a given padding `group` to 0, which means that a default will be used.
inline void resetPadding(FormatPaddingGroup type) noexcept { _options.resetPadding(type); }
//! \}
@@ -100,9 +87,8 @@ public:
//! Logs `str` - must be reimplemented.
//!
//! The function can accept either a null terminated string if `size` is
//! `SIZE_MAX` or a non-null terminated string of the given `size`. The
//! function cannot assume that the data is null terminated and must handle
//! The function can accept either a null terminated string if `size` is `SIZE_MAX` or a non-null terminated
//! string of the given `size`. The function cannot assume that the data is null terminated and must handle
//! non-null terminated inputs.
virtual Error _log(const char* data, size_t size) noexcept = 0;
@@ -111,21 +97,15 @@ public:
//! Logs content of a string `str`.
inline Error log(const String& str) noexcept { return _log(str.data(), str.size()); }
//! Formats the message by using `snprintf()` and then passes the formatted
//! string to \ref _log().
//! Formats the message by using `snprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logf(const char* fmt, ...) noexcept;
//! Formats the message by using `vsnprintf()` and then passes the formatted
//! string to \ref _log().
//! Formats the message by using `vsnprintf()` and then passes the formatted string to \ref _log().
ASMJIT_API Error logv(const char* fmt, va_list ap) noexcept;
//! \}
};
// ============================================================================
// [asmjit::FileLogger]
// ============================================================================
//! Logger that can log to a `FILE*`.
class ASMJIT_VIRTAPI FileLogger : public Logger {
public:
@@ -146,17 +126,14 @@ public:
//! \name Accessors
//! \{
//! Returns the logging output stream or null if the logger has no output
//! stream.
//! Returns the logging output stream or null if the logger has no output stream.
inline FILE* file() const noexcept { return _file; }
//! Sets the logging output stream to `stream` or null.
//!
//! \note If the `file` is null the logging will be disabled. When a logger
//! is attached to `CodeHolder` or any emitter the logging API will always
//! be called regardless of the output file. This means that if you really
//! want to disable logging at emitter level you must not attach a logger
//! to it.
//! \note If the `file` is null the logging will be disabled. When a logger is attached to `CodeHolder` or any
//! emitter the logging API will always be called regardless of the output file. This means that if you really
//! want to disable logging at emitter level you must not attach a logger to it.
inline void setFile(FILE* file) noexcept { _file = file; }
//! \}
@@ -164,10 +141,6 @@ public:
ASMJIT_API Error _log(const char* data, size_t size = SIZE_MAX) noexcept override;
};
// ============================================================================
// [asmjit::StringLogger]
// ============================================================================
//! Logger that stores everything in an internal string buffer.
class ASMJIT_VIRTAPI StringLogger : public Logger {
public:

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_MISC_P_H_INCLUDED
#define ASMJIT_CORE_MISC_P_H_INCLUDED

View File

@@ -1,36 +1,22 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/operand.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Operand - Unit]
// ============================================================================
// Operand - Tests
// ===============
#if defined(ASMJIT_TEST)
enum class StrongEnumForImmTests : uint32_t {
kValue0,
kValue0xFFFFFFFF = 0xFFFFFFFFu
};
UNIT(operand) {
INFO("Checking operand sizes");
EXPECT(sizeof(Operand) == 16);
@@ -65,22 +51,23 @@ UNIT(operand) {
EXPECT(dummy.as<BaseReg>().isValid() == false);
// Create some register (not specific to any architecture).
uint32_t rSig = Operand::kOpReg | (1 << Operand::kSignatureRegTypeShift ) |
(2 << Operand::kSignatureRegGroupShift) |
(8 << Operand::kSignatureSizeShift ) ;
BaseReg r1 = BaseReg::fromSignatureAndId(rSig, 5);
OperandSignature rSig = OperandSignature::fromOpType(OperandType::kReg) |
OperandSignature::fromRegType(RegType::kVec128) |
OperandSignature::fromRegGroup(RegGroup::kVec) |
OperandSignature::fromSize(8);
BaseReg r1(rSig, 5);
EXPECT(r1.isValid() == true);
EXPECT(r1.isReg() == true);
EXPECT(r1.isReg(1) == true);
EXPECT(r1.isReg(RegType::kVec128) == true);
EXPECT(r1.isPhysReg() == true);
EXPECT(r1.isVirtReg() == false);
EXPECT(r1.signature() == rSig);
EXPECT(r1.type() == 1);
EXPECT(r1.group() == 2);
EXPECT(r1.type() == RegType::kVec128);
EXPECT(r1.group() == RegGroup::kVec);
EXPECT(r1.size() == 8);
EXPECT(r1.id() == 5);
EXPECT(r1.isReg(1, 5) == true); // RegType and Id.
EXPECT(r1.isReg(RegType::kVec128, 5) == true); // RegType and Id.
EXPECT(r1._data[0] == 0);
EXPECT(r1._data[1] == 0);
@@ -88,7 +75,7 @@ UNIT(operand) {
BaseReg r2(r1, 6);
EXPECT(r2.isValid() == true);
EXPECT(r2.isReg() == true);
EXPECT(r2.isReg(1) == true);
EXPECT(r2.isReg(RegType::kVec128) == true);
EXPECT(r2.isPhysReg() == true);
EXPECT(r2.isVirtReg() == false);
EXPECT(r2.signature() == rSig);
@@ -96,7 +83,7 @@ UNIT(operand) {
EXPECT(r2.group() == r1.group());
EXPECT(r2.size() == r1.size());
EXPECT(r2.id() == 6);
EXPECT(r2.isReg(1, 6) == true);
EXPECT(r2.isReg(RegType::kVec128, 6) == true);
r1.reset();
EXPECT(!r1.isReg());
@@ -126,17 +113,19 @@ UNIT(operand) {
INFO("Checking basic functionality of Imm");
Imm immValue(-42);
EXPECT(immValue.type() == Imm::kTypeInteger);
EXPECT(immValue.type() == ImmType::kInt);
EXPECT(Imm(-1).value() == -1);
EXPECT(imm(-1).value() == -1);
EXPECT(immValue.value() == -42);
EXPECT(imm(0xFFFFFFFF).value() == int64_t(0xFFFFFFFF));
Imm immDouble(0.4);
EXPECT(immDouble.type() == Imm::kTypeDouble);
EXPECT(immDouble.type() == ImmType::kDouble);
EXPECT(immDouble.valueAs<double>() == 0.4);
EXPECT(immDouble == imm(0.4));
EXPECT(Imm(StrongEnumForImmTests::kValue0).value() == 0);
EXPECT(Imm(StrongEnumForImmTests::kValue0xFFFFFFFF).value() == 0xFFFFFFFFu);
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/osutils.h"
@@ -36,10 +18,6 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::OSUtils - GetTickCount]
// ============================================================================
uint32_t OSUtils::getTickCount() noexcept {
#if defined(_WIN32)
enum HiResStatus : uint32_t {

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_H_INCLUDED
@@ -31,22 +13,14 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::OSUtils]
// ============================================================================
//! Operating system utilities.
namespace OSUtils {
//! Gets the current CPU tick count, used for benchmarking (1ms resolution).
ASMJIT_API uint32_t getTickCount() noexcept;
};
// ============================================================================
// [asmjit::Lock]
// ============================================================================
//! \cond INTERNAL
//! Lock.
//!
//! Lock is internal, it cannot be used outside of AsmJit, however, its internal
@@ -72,11 +46,11 @@ public:
Handle _handle;
#endif
inline Lock() noexcept;
inline ~Lock() noexcept;
ASMJIT_FORCE_INLINE Lock() noexcept;
ASMJIT_FORCE_INLINE ~Lock() noexcept;
inline void lock() noexcept;
inline void unlock() noexcept;
ASMJIT_FORCE_INLINE void lock() noexcept;
ASMJIT_FORCE_INLINE void unlock() noexcept;
};
//! \endcond

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_OSUTILS_P_H_INCLUDED
#define ASMJIT_CORE_OSUTILS_P_H_INCLUDED
@@ -32,47 +14,39 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::Lock]
// ============================================================================
#if defined(_WIN32)
// Windows implementation.
static_assert(sizeof(Lock::Handle) == sizeof(CRITICAL_SECTION), "asmjit::Lock::Handle layout must match CRITICAL_SECTION");
static_assert(alignof(Lock::Handle) == alignof(CRITICAL_SECTION), "asmjit::Lock::Handle alignment must match CRITICAL_SECTION");
inline Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
inline void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_FORCE_INLINE Lock::Lock() noexcept { InitializeCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_FORCE_INLINE Lock::~Lock() noexcept { DeleteCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_FORCE_INLINE void Lock::lock() noexcept { EnterCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
ASMJIT_FORCE_INLINE void Lock::unlock() noexcept { LeaveCriticalSection(reinterpret_cast<CRITICAL_SECTION*>(&_handle)); }
#elif !defined(__EMSCRIPTEN__)
// PThread implementation.
#ifdef PTHREAD_MUTEX_INITIALIZER
inline Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
ASMJIT_FORCE_INLINE Lock::Lock() noexcept : _handle(PTHREAD_MUTEX_INITIALIZER) {}
#else
inline Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
ASMJIT_FORCE_INLINE Lock::Lock() noexcept { pthread_mutex_init(&_handle, nullptr); }
#endif
inline Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
inline void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
inline void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
ASMJIT_FORCE_INLINE Lock::~Lock() noexcept { pthread_mutex_destroy(&_handle); }
ASMJIT_FORCE_INLINE void Lock::lock() noexcept { pthread_mutex_lock(&_handle); }
ASMJIT_FORCE_INLINE void Lock::unlock() noexcept { pthread_mutex_unlock(&_handle); }
#else
// Dummy implementation - Emscripten or other unsupported platform.
inline Lock::Lock() noexcept {}
inline Lock::~Lock() noexcept {}
inline void Lock::lock() noexcept {}
inline void Lock::unlock() noexcept {}
ASMJIT_FORCE_INLINE Lock::Lock() noexcept {}
ASMJIT_FORCE_INLINE Lock::~Lock() noexcept {}
ASMJIT_FORCE_INLINE void Lock::lock() noexcept {}
ASMJIT_FORCE_INLINE void Lock::unlock() noexcept {}
#endif
// ============================================================================
// [asmjit::LockGuard]
// ============================================================================
//! Scoped lock.
class LockGuard {
public:

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
#define ASMJIT_CORE_RAASSIGNMENT_P_H_INCLUDED
@@ -35,10 +17,12 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RAAssignment]
// ============================================================================
//! Holds the current register assignment.
//!
//! Has two purposes:
//!
//! 1. Holds register assignment of a local register allocator (see \ref RALocalAllocator).
//! 2. Holds register assignment of the entry of basic blocks (see \ref RABlock).
class RAAssignment {
public:
ASMJIT_NONCOPYABLE(RAAssignment)
@@ -120,6 +104,9 @@ public:
}
};
//! \name Members
//! \{
//! Physical registers layout.
Layout _layout;
//! WorkReg to PhysReg mapping.
@@ -127,7 +114,9 @@ public:
//! PhysReg to WorkReg mapping and assigned/dirty bits.
PhysToWorkMap* _physToWorkMap;
//! Optimization to translate PhysRegs to WorkRegs faster.
uint32_t* _physToWorkIds[BaseReg::kGroupVirt];
Support::Array<uint32_t*, Globals::kNumVirtGroups> _physToWorkIds;
//! \}
//! \name Construction & Destruction
//! \{
@@ -137,31 +126,30 @@ public:
resetMaps();
}
inline void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
ASMJIT_FORCE_INLINE void initLayout(const RARegCount& physCount, const RAWorkRegs& workRegs) noexcept {
// Layout must be initialized before data.
ASMJIT_ASSERT(_physToWorkMap == nullptr);
ASMJIT_ASSERT(_workToPhysMap == nullptr);
_layout.physIndex.buildIndexes(physCount);
_layout.physCount = physCount;
_layout.physTotal = uint32_t(_layout.physIndex[BaseReg::kGroupVirt - 1]) +
uint32_t(_layout.physCount[BaseReg::kGroupVirt - 1]) ;
_layout.physTotal = uint32_t(_layout.physIndex[RegGroup::kMaxVirt]) +
uint32_t(_layout.physCount[RegGroup::kMaxVirt]) ;
_layout.workCount = workRegs.size();
_layout.workRegs = &workRegs;
}
inline void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
ASMJIT_FORCE_INLINE void initMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
for (RegGroup group : RegGroupVirtValues{})
_physToWorkIds[group] = physToWorkMap->workIds + _layout.physIndex.get(group);
}
inline void resetMaps() noexcept {
ASMJIT_FORCE_INLINE void resetMaps() noexcept {
_physToWorkMap = nullptr;
_workToPhysMap = nullptr;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
_physToWorkIds[group] = nullptr;
_physToWorkIds.fill(nullptr);
}
//! \}
@@ -174,30 +162,30 @@ public:
inline RARegMask& assigned() noexcept { return _physToWorkMap->assigned; }
inline const RARegMask& assigned() const noexcept { return _physToWorkMap->assigned; }
inline uint32_t assigned(uint32_t group) const noexcept { return _physToWorkMap->assigned[group]; }
inline uint32_t assigned(RegGroup group) const noexcept { return _physToWorkMap->assigned[group]; }
inline RARegMask& dirty() noexcept { return _physToWorkMap->dirty; }
inline const RARegMask& dirty() const noexcept { return _physToWorkMap->dirty; }
inline uint32_t dirty(uint32_t group) const noexcept { return _physToWorkMap->dirty[group]; }
inline RegMask dirty(RegGroup group) const noexcept { return _physToWorkMap->dirty[group]; }
inline uint32_t workToPhysId(uint32_t group, uint32_t workId) const noexcept {
inline uint32_t workToPhysId(RegGroup group, uint32_t workId) const noexcept {
DebugUtils::unused(group);
ASMJIT_ASSERT(workId != kWorkNone);
ASMJIT_ASSERT(workId < _layout.workCount);
return _workToPhysMap->physIds[workId];
}
inline uint32_t physToWorkId(uint32_t group, uint32_t physId) const noexcept {
inline uint32_t physToWorkId(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return _physToWorkIds[group][physId];
}
inline bool isPhysAssigned(uint32_t group, uint32_t physId) const noexcept {
inline bool isPhysAssigned(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->assigned[group], physId);
}
inline bool isPhysDirty(uint32_t group, uint32_t physId) const noexcept {
inline bool isPhysDirty(RegGroup group, uint32_t physId) const noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
return Support::bitTest(_physToWorkMap->dirty[group], physId);
}
@@ -205,15 +193,15 @@ public:
//! \}
//! \name Assignment
//!
//! These are low-level allocation helpers that are used to update the current mappings between physical and
//! virt/work registers and also to update masks that represent allocated and dirty registers. These functions
//! don't emit any code; they are only used to update and keep all mappings in sync.
//!
//! \{
// These are low-level allocation helpers that are used to update the current
// mappings between physical and virt/work registers and also to update masks
// that represent allocated and dirty registers. These functions don't emit
// any code; they are only used to update and keep all mappings in sync.
//! Assign [VirtReg/WorkReg] to a physical register.
ASMJIT_INLINE void assign(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
inline void assign(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
ASMJIT_ASSERT(workToPhysId(group, workId) == kPhysNone);
ASMJIT_ASSERT(physToWorkId(group, physId) == kWorkNone);
ASMJIT_ASSERT(!isPhysAssigned(group, physId));
@@ -222,15 +210,15 @@ public:
_workToPhysMap->physIds[workId] = uint8_t(physId);
_physToWorkIds[group][physId] = workId;
uint32_t regMask = Support::bitMask(physId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] |= regMask;
_physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
_physToWorkMap->dirty[group] |= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
//! Reassign [VirtReg/WorkReg] to `dstPhysId` from `srcPhysId`.
ASMJIT_INLINE void reassign(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
inline void reassign(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
ASMJIT_ASSERT(dstPhysId != srcPhysId);
ASMJIT_ASSERT(workToPhysId(group, workId) == srcPhysId);
ASMJIT_ASSERT(physToWorkId(group, srcPhysId) == workId);
@@ -241,19 +229,19 @@ public:
_physToWorkIds[group][srcPhysId] = kWorkNone;
_physToWorkIds[group][dstPhysId] = workId;
uint32_t srcMask = Support::bitMask(srcPhysId);
uint32_t dstMask = Support::bitMask(dstPhysId);
RegMask srcMask = Support::bitMask(srcPhysId);
RegMask dstMask = Support::bitMask(dstPhysId);
uint32_t dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
uint32_t regMask = dstMask | srcMask;
bool dirty = (_physToWorkMap->dirty[group] & srcMask) != 0;
RegMask regMask = dstMask | srcMask;
_physToWorkMap->assigned[group] ^= regMask;
_physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<uint32_t>(dirty);
_physToWorkMap->dirty[group] ^= regMask & Support::bitMaskFromBool<RegMask>(dirty);
verify();
}
ASMJIT_INLINE void swap(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
inline void swap(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
ASMJIT_ASSERT(aPhysId != bPhysId);
ASMJIT_ASSERT(workToPhysId(group, aWorkId) == aPhysId);
ASMJIT_ASSERT(workToPhysId(group, bWorkId) == bPhysId);
@@ -267,21 +255,17 @@ public:
_physToWorkIds[group][aPhysId] = bWorkId;
_physToWorkIds[group][bPhysId] = aWorkId;
uint32_t aMask = Support::bitMask(aPhysId);
uint32_t bMask = Support::bitMask(bPhysId);
uint32_t flipMask = Support::bitMaskFromBool<uint32_t>(
((_physToWorkMap->dirty[group] & aMask) != 0) ^
((_physToWorkMap->dirty[group] & bMask) != 0));
uint32_t regMask = aMask | bMask;
RegMask aMask = Support::bitMask(aPhysId);
RegMask bMask = Support::bitMask(bPhysId);
RegMask flipMask = Support::bitMaskFromBool<RegMask>(((_physToWorkMap->dirty[group] & aMask) != 0) ^ ((_physToWorkMap->dirty[group] & bMask) != 0));
RegMask regMask = aMask | bMask;
_physToWorkMap->dirty[group] ^= regMask & flipMask;
verify();
}
//! Unassign [VirtReg/WorkReg] from a physical register.
ASMJIT_INLINE void unassign(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline void unassign(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(physId < Globals::kMaxPhysRegs);
ASMJIT_ASSERT(workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(physToWorkId(group, physId) == workId);
@@ -290,22 +274,22 @@ public:
_workToPhysMap->physIds[workId] = kPhysNone;
_physToWorkIds[group][physId] = kWorkNone;
uint32_t regMask = Support::bitMask(physId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->assigned[group] &= ~regMask;
_physToWorkMap->dirty[group] &= ~regMask;
verify();
}
inline void makeClean(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline void makeClean(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
uint32_t regMask = Support::bitMask(physId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] &= ~regMask;
}
inline void makeDirty(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline void makeDirty(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
DebugUtils::unused(workId);
uint32_t regMask = Support::bitMask(physId);
RegMask regMask = Support::bitMask(physId);
_physToWorkMap->dirty[group] |= regMask;
}
@@ -314,12 +298,10 @@ public:
//! \name Utilities
//! \{
inline void swap(RAAssignment& other) noexcept {
ASMJIT_FORCE_INLINE void swap(RAAssignment& other) noexcept {
std::swap(_workToPhysMap, other._workToPhysMap);
std::swap(_physToWorkMap, other._physToWorkMap);
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++)
std::swap(_physToWorkIds[group], other._physToWorkIds[group]);
_physToWorkIds.swap(other._physToWorkIds);
}
inline void copyFrom(const PhysToWorkMap* physToWorkMap, const WorkToPhysMap* workToPhysMap) noexcept {
@@ -373,7 +355,7 @@ public:
uint32_t physId = _workToPhysMap->physIds[workId];
if (physId != kPhysNone) {
const RAWorkReg* workReg = _layout.workRegs->at(workId);
uint32_t group = workReg->group();
RegGroup group = workReg->group();
ASMJIT_ASSERT(_physToWorkIds[group][physId] == workId);
}
}
@@ -381,7 +363,7 @@ public:
// Verify PhysToWorkMap.
{
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t physCount = _layout.physCount[group];
for (uint32_t physId = 0; physId < physCount; physId++) {
uint32_t workId = _physToWorkIds[group][physId];

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
#define ASMJIT_CORE_RABUILDERS_P_H_INCLUDED
@@ -36,13 +18,22 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RACFGBuilderT]
// ============================================================================
template<typename This>
class RACFGBuilderT {
public:
enum : uint32_t {
kRootIndentation = 2,
kCodeIndentation = 4,
// NOTE: This is a bit hacky. There are some nodes which are processed twice (see `onBeforeInvoke()` and
// `onBeforeRet()`) as they can insert some nodes around them. Since we don't have any flags to mark these
// we just use their position that is [at that time] unassigned.
kNodePositionDidOnBefore = 0xFFFFFFFFu
};
//! \name Members
//! \{
BaseRAPass* _pass = nullptr;
BaseCompiler* _cc = nullptr;
RABlock* _curBlock = nullptr;
@@ -58,38 +49,30 @@ public:
#ifndef ASMJIT_NO_LOGGING
Logger* _logger = nullptr;
uint32_t _logFlags = FormatOptions::kFlagPositions;
FormatOptions _formatOptions {};
StringTmp<512> _sb;
#endif
static constexpr uint32_t kRootIndentation = 2;
static constexpr uint32_t kCodeIndentation = 4;
// NOTE: This is a bit hacky. There are some nodes which are processed twice
// (see `onBeforeInvoke()` and `onBeforeRet()`) as they can insert some nodes
// around them. Since we don't have any flags to mark these we just use their
// position that is [at that time] unassigned.
static constexpr uint32_t kNodePositionDidOnBefore = 0xFFFFFFFFu;
//! \}
inline RACFGBuilderT(BaseRAPass* pass) noexcept
: _pass(pass),
_cc(pass->cc()) {
#ifndef ASMJIT_NO_LOGGING
_logger = _pass->debugLogger();
_logger = _pass->hasDiagnosticOption(DiagnosticOptions::kRADebugCFG) ? _pass->logger() : nullptr;
if (_logger)
_logFlags |= _logger->flags();
_formatOptions = _logger->options();
#endif
}
inline BaseCompiler* cc() const noexcept { return _cc; }
// --------------------------------------------------------------------------
// [Run]
// --------------------------------------------------------------------------
//! \name Run
//! \{
//! Called per function by an architecture-specific CFG builder.
Error run() noexcept {
log("[RAPass::BuildCFG]\n");
log("[BuildCFG]\n");
ASMJIT_PROPAGATE(prepare());
logNode(_funcNode, kRootIndentation);
@@ -114,12 +97,11 @@ public:
// Instruction | Jump | Invoke | Return
// ------------------------------------
// Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them
// share the same interface that provides operands that have read/write
// semantics.
// Handle `InstNode`, `InvokeNode`, and `FuncRetNode`. All of them share the same interface that provides
// operands that have read/write semantics.
if (ASMJIT_UNLIKELY(!_curBlock)) {
// Unreachable code has to be removed, we cannot allocate registers
// in such code as we cannot do proper liveness analysis in such case.
// Unreachable code has to be removed, we cannot allocate registers in such code as we cannot do proper
// liveness analysis in such case.
removeNode(node);
node = next;
continue;
@@ -129,15 +111,13 @@ public:
if (node->isInvoke() || node->isFuncRet()) {
if (node->position() != kNodePositionDidOnBefore) {
// Call and Reg are complicated as they may insert some surrounding
// code around them. The simplest approach is to get the previous
// node, call the `onBefore()` handlers and then check whether
// anything changed and restart if so. By restart we mean that the
// current `node` would go back to the first possible inserted node
// by `onBeforeInvoke()` or `onBeforeRet()`.
// Call and Reg are complicated as they may insert some surrounding code around them. The simplest
// approach is to get the previous node, call the `onBefore()` handlers and then check whether
// anything changed and restart if so. By restart we mean that the current `node` would go back to
// the first possible inserted node by `onBeforeInvoke()` or `onBeforeRet()`.
BaseNode* prev = node->prev();
if (node->type() == BaseNode::kNodeInvoke)
if (node->type() == NodeType::kInvoke)
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeInvoke(node->as<InvokeNode>()));
else
ASMJIT_PROPAGATE(static_cast<This*>(this)->onBeforeRet(node->as<FuncRetNode>()));
@@ -167,9 +147,9 @@ public:
InstNode* inst = node->as<InstNode>();
logNode(inst, kCodeIndentation);
uint32_t controlType = BaseInst::kControlNone;
InstControlFlow cf = InstControlFlow::kRegular;
ib.reset();
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, controlType, ib));
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInst(inst, cf, ib));
if (node->isInvoke()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onInvoke(inst->as<InvokeNode>(), ib));
@@ -177,20 +157,20 @@ public:
if (node->isFuncRet()) {
ASMJIT_PROPAGATE(static_cast<This*>(this)->onRet(inst->as<FuncRetNode>(), ib));
controlType = BaseInst::kControlReturn;
cf = InstControlFlow::kReturn;
}
if (controlType == BaseInst::kControlJump) {
if (cf == InstControlFlow::kJump) {
uint32_t fixedRegCount = 0;
for (RATiedReg& tiedReg : ib) {
RAWorkReg* workReg = _pass->workRegById(tiedReg.workId());
if (workReg->group() == BaseReg::kGroupGp) {
if (workReg->group() == RegGroup::kGp) {
uint32_t useId = tiedReg.useId();
if (useId == BaseReg::kIdBad) {
useId = _pass->_scratchRegIndexes[fixedRegCount++];
tiedReg.setUseId(useId);
}
_curBlock->addExitScratchGpRegs(Support::bitMask<uint32_t>(useId));
_curBlock->addExitScratchGpRegs(Support::bitMask(useId));
}
}
}
@@ -198,14 +178,14 @@ public:
ASMJIT_PROPAGATE(_pass->assignRAInst(inst, _curBlock, ib));
_blockRegStats.combineWith(ib._stats);
if (controlType != BaseInst::kControlNone) {
if (cf != InstControlFlow::kRegular) {
// Support for conditional and unconditional jumps.
if (controlType == BaseInst::kControlJump || controlType == BaseInst::kControlBranch) {
if (cf == InstControlFlow::kJump || cf == InstControlFlow::kBranch) {
_curBlock->setLast(node);
_curBlock->addFlags(RABlock::kFlagHasTerminator);
_curBlock->addFlags(RABlockFlags::kHasTerminator);
_curBlock->makeConstructed(_blockRegStats);
if (!(inst->instOptions() & BaseInst::kOptionUnfollow)) {
if (!inst->hasOption(InstOptions::kUnfollow)) {
// Jmp/Jcc/Call/Loop/etc...
uint32_t opCount = inst->opCount();
const Operand* opArray = inst->operands();
@@ -227,14 +207,13 @@ public:
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(targetBlock));
}
else {
// Not a label - could be jump with reg/mem operand, which
// means that it can go anywhere. Such jumps must either be
// annotated so the CFG can be properly constructed, otherwise
// we assume the worst case - can jump to any basic block.
// Not a label - could be jump with reg/mem operand, which means that it can go anywhere. Such jumps
// must either be annotated so the CFG can be properly constructed, otherwise we assume the worst case
// - can jump to any basic block.
JumpAnnotation* jumpAnnotation = nullptr;
_curBlock->addFlags(RABlock::kFlagHasJumpTable);
_curBlock->addFlags(RABlockFlags::kHasJumpTable);
if (inst->type() == BaseNode::kNodeJump)
if (inst->type() == NodeType::kJump)
jumpAnnotation = inst->as<JumpNode>()->annotation();
if (jumpAnnotation) {
@@ -262,13 +241,11 @@ public:
}
}
if (controlType == BaseInst::kControlJump) {
// Unconditional jump makes the code after the jump unreachable,
// which will be removed instantly during the CFG construction;
// as we cannot allocate registers for instructions that are not
// part of any block. Of course we can leave these instructions
// as they are, however, that would only postpone the problem as
// assemblers can't encode instructions that use virtual registers.
if (cf == InstControlFlow::kJump) {
// Unconditional jump makes the code after the jump unreachable, which will be removed instantly during
// the CFG construction; as we cannot allocate registers for instructions that are not part of any block.
// Of course we can leave these instructions as they are, however, that would only postpone the problem
// as assemblers can't encode instructions that use virtual registers.
_curBlock = nullptr;
}
else {
@@ -277,7 +254,7 @@ public:
return DebugUtils::errored(kErrorInvalidState);
RABlock* consecutiveBlock;
if (node->type() == BaseNode::kNodeLabel) {
if (node->type() == NodeType::kLabel) {
if (node->hasPassData()) {
consecutiveBlock = node->passData<RABlock>();
}
@@ -294,7 +271,7 @@ public:
return DebugUtils::errored(kErrorOutOfMemory);
}
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
ASMJIT_PROPAGATE(_curBlock->prependSuccessor(consecutiveBlock));
_curBlock = consecutiveBlock;
@@ -310,7 +287,7 @@ public:
}
}
if (controlType == BaseInst::kControlReturn) {
if (cf == InstControlFlow::kReturn) {
_curBlock->setLast(node);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(_retBlock));
@@ -319,19 +296,18 @@ public:
}
}
}
else if (node->type() == BaseNode::kNodeLabel) {
else if (node->type() == NodeType::kLabel) {
// Label - Basic-Block Management
// ------------------------------
if (!_curBlock) {
// If the current code is unreachable the label makes it reachable
// again. We may remove the whole block in the future if it's not
// referenced though.
// If the current code is unreachable the label makes it reachable again. We may remove the whole block in
// the future if it's not referenced though.
_curBlock = node->passData<RABlock>();
if (_curBlock) {
// If the label has a block assigned we can either continue with
// it or skip it if the block has been constructed already.
// If the label has a block assigned we can either continue with it or skip it if the block has been
// constructed already.
if (_curBlock->isConstructed())
break;
}
@@ -354,20 +330,18 @@ public:
consecutive->makeTargetable();
if (_curBlock == consecutive) {
// The label currently processed is part of the current block. This
// is only possible for multiple labels that are right next to each
// other or labels that are separated by non-code nodes like directives
// and comments.
// The label currently processed is part of the current block. This is only possible for multiple labels
// that are right next to each other or labels that are separated by non-code nodes like directives and
// comments.
if (ASMJIT_UNLIKELY(_hasCode))
return DebugUtils::errored(kErrorInvalidState);
}
else {
// Label makes the current block constructed. There is a chance that the
// Label is not used, but we don't know that at this point. In the worst
// case there would be two blocks next to each other, it's just fine.
// Label makes the current block constructed. There is a chance that the Label is not used, but we don't
// know that at this point. In the worst case there would be two blocks next to each other, it's just fine.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
ASMJIT_PROPAGATE(_curBlock->appendSuccessor(consecutive));
@@ -381,12 +355,11 @@ public:
else {
// First time we see this label.
if (_hasCode || _curBlock == entryBlock) {
// Cannot continue the current block if it already contains some
// code or it's a block entry. We need to create a new block and
// make it a successor.
// Cannot continue the current block if it already contains some code or it's a block entry. We need to
// create a new block and make it a successor.
ASMJIT_ASSERT(_curBlock->last() != node);
_curBlock->setLast(node->prev());
_curBlock->addFlags(RABlock::kFlagHasConsecutive);
_curBlock->addFlags(RABlockFlags::kHasConsecutive);
_curBlock->makeConstructed(_blockRegStats);
RABlock* consecutive = _pass->newBlock(node);
@@ -425,7 +398,7 @@ public:
logNode(node, kCodeIndentation);
if (node->type() == BaseNode::kNodeSentinel) {
if (node->type() == NodeType::kSentinel) {
if (node == _funcNode->endNode()) {
// Make sure we didn't flow here if this is the end of the function sentinel.
if (ASMJIT_UNLIKELY(_curBlock))
@@ -433,7 +406,7 @@ public:
break;
}
}
else if (node->type() == BaseNode::kNodeFunc) {
else if (node->type() == NodeType::kFunc) {
// RAPass can only compile a single function at a time. If we
// encountered a function it must be the current one, bail if not.
if (ASMJIT_UNLIKELY(node != _funcNode))
@@ -448,10 +421,9 @@ public:
// Advance to the next node.
node = next;
// NOTE: We cannot encounter a NULL node, because every function must be
// terminated by a sentinel (`stop`) node. If we encountered a NULL node it
// means that something went wrong and this node list is corrupted; bail in
// such case.
// NOTE: We cannot encounter a NULL node, because every function must be terminated by a sentinel (`stop`)
// node. If we encountered a NULL node it means that something went wrong and this node list is corrupted;
// bail in such case.
if (ASMJIT_UNLIKELY(!node))
return DebugUtils::errored(kErrorInvalidState);
}
@@ -465,9 +437,10 @@ public:
return _pass->initSharedAssignments(_sharedAssignmentsMap);
}
// --------------------------------------------------------------------------
// [Prepare]
// --------------------------------------------------------------------------
//! \}
//! \name Prepare
//! \{
//! Prepares the CFG builder of the current function.
Error prepare() noexcept {
@@ -504,9 +477,10 @@ public:
return _pass->addBlock(_curBlock);
}
// --------------------------------------------------------------------------
// [Utilities]
// --------------------------------------------------------------------------
//! \}
//! \name Utilities
//! \{
//! Called when a `node` is removed, e.g. because of a dead code elimination.
void removeNode(BaseNode* node) noexcept {
@@ -516,9 +490,8 @@ public:
//! Handles block with unknown jump, which could be a jump to a jump table.
//!
//! If we encounter such block we basically insert all existing blocks as
//! successors except the function entry block and a natural successor, if
//! such block exists.
//! If we encounter such block we basically insert all existing blocks as successors except the function entry
//! block and a natural successor, if such block exists.
Error handleBlockWithUnknownJump(RABlock* block) noexcept {
RABlocks& blocks = _pass->blocks();
size_t blockCount = blocks.size();
@@ -570,9 +543,10 @@ public:
return kErrorOk;
}
// --------------------------------------------------------------------------
// [Logging]
// --------------------------------------------------------------------------
//! \}
//! \name Logging
//! \{
#ifndef ASMJIT_NO_LOGGING
template<typename... Args>
@@ -606,7 +580,7 @@ public:
_sb.append(action);
_sb.append(' ');
}
Formatter::formatNode(_sb, _logFlags, cc(), node);
Formatter::formatNode(_sb, _formatOptions, cc(), node);
_sb.append('\n');
_logger->log(_sb);
}
@@ -625,6 +599,8 @@ public:
DebugUtils::unused(node, indentation, action);
}
#endif
//! \}
};
//! \}

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
@@ -29,20 +11,18 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::RALocalAllocator - Utilities]
// ============================================================================
// RALocalAllocator - Utilities
// ============================
static ASMJIT_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
static ASMJIT_FORCE_INLINE RATiedReg* RALocal_findTiedRegByWorkId(RATiedReg* tiedRegs, size_t count, uint32_t workId) noexcept {
for (size_t i = 0; i < count; i++)
if (tiedRegs[i].workId() == workId)
return &tiedRegs[i];
return nullptr;
}
// ============================================================================
// [asmjit::RALocalAllocator - Init / Reset]
// ============================================================================
// RALocalAllocator - Init & Reset
// ===============================
Error RALocalAllocator::init() noexcept {
PhysToWorkMap* physToWorkMap;
@@ -67,9 +47,8 @@ Error RALocalAllocator::init() noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::RALocalAllocator - Assignment]
// ============================================================================
// RALocalAllocator - Assignment
// =============================
Error RALocalAllocator::makeInitialAssignment() noexcept {
FuncNode* func = _pass->func();
@@ -83,10 +62,12 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
// Unassigned argument.
VirtReg* virtReg = func->argPack(argIndex)[valueIndex];
if (!virtReg)
const RegOnly& regArg = func->argPack(argIndex)[valueIndex];
if (!regArg.isReg() || !_cc->isVirtIdValid(regArg.id()))
continue;
VirtReg* virtReg = _cc->virtRegById(regArg.id());
// Unreferenced argument.
RAWorkReg* workReg = virtReg->workReg();
if (!workReg)
@@ -97,18 +78,18 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
if (!liveIn.bitAt(workId))
continue;
uint32_t group = workReg->group();
RegGroup group = workReg->group();
if (_curAssignment.workToPhysId(group, workId) != RAAssignment::kPhysNone)
continue;
uint32_t allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
RegMask allocableRegs = _availableRegs[group] & ~_curAssignment.assigned(group);
if (iter == 0) {
// First iteration: Try to allocate to home RegId.
if (workReg->hasHomeRegId()) {
uint32_t physId = workReg->homeRegId();
if (Support::bitTest(allocableRegs, physId)) {
_curAssignment.assign(group, workId, physId, true);
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->info().type(), physId, workReg->typeId());
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->type(), physId, workReg->typeId());
continue;
}
}
@@ -120,7 +101,7 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
if (allocableRegs) {
uint32_t physId = Support::ctz(allocableRegs);
_curAssignment.assign(group, workId, physId, true);
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->info().type(), physId, workReg->typeId());
_pass->_argsAssignment.assignRegInPack(argIndex, valueIndex, workReg->type(), physId, workReg->typeId());
}
else {
// This register will definitely need stack, create the slot now and assign also `argIndex`
@@ -130,7 +111,7 @@ Error RALocalAllocator::makeInitialAssignment() noexcept {
return DebugUtils::errored(kErrorOutOfMemory);
// This means STACK_ARG may be moved to STACK.
workReg->addFlags(RAWorkReg::kFlagStackArgToStack);
workReg->addFlags(RAWorkRegFlags::kStackArgToStack);
_pass->_numStackArgsToStackSlots++;
}
}
@@ -165,15 +146,15 @@ Error RALocalAllocator::switchToAssignment(
if (tryMode)
return kErrorOk;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
// ------------------------------------------------------------------------
// STEP 1:
for (RegGroup group : RegGroupVirtValues{}) {
// STEP 1
// ------
//
// - KILL all registers that are not live at `dst`,
// - SPILL all registers that are not assigned at `dst`.
// ------------------------------------------------------------------------
if (!tryMode) {
Support::BitWordIterator<uint32_t> it(cur.assigned(group));
Support::BitWordIterator<RegMask> it(cur.assigned(group));
while (it.hasNext()) {
uint32_t physId = it.next();
uint32_t workId = cur.physToWorkId(group, physId);
@@ -195,19 +176,18 @@ Error RALocalAllocator::switchToAssignment(
}
}
// ------------------------------------------------------------------------
// STEP 2:
// - MOVE and SWAP registers from their current assignments into their
// DST assignments.
// STEP 2
// ------
//
// - MOVE and SWAP registers from their current assignments into their DST assignments.
// - Build `willLoadRegs` mask of registers scheduled for `onLoadReg()`.
// ------------------------------------------------------------------------
// Current run-id (1 means more aggressive decisions).
int32_t runId = -1;
// Remaining registers scheduled for `onLoadReg()`.
uint32_t willLoadRegs = 0;
RegMask willLoadRegs = 0;
// Remaining registers to be allocated in this loop.
uint32_t affectedRegs = dst.assigned(group);
RegMask affectedRegs = dst.assigned(group);
while (affectedRegs) {
if (++runId == 2) {
@@ -218,10 +198,10 @@ Error RALocalAllocator::switchToAssignment(
break;
}
Support::BitWordIterator<uint32_t> it(affectedRegs);
Support::BitWordIterator<RegMask> it(affectedRegs);
while (it.hasNext()) {
uint32_t physId = it.next();
uint32_t physMask = Support::bitMask(physId);
RegMask physMask = Support::bitMask<RegMask>(physId);
uint32_t curWorkId = cur.physToWorkId(group, physId);
uint32_t dstWorkId = dst.physToWorkId(group, physId);
@@ -243,7 +223,7 @@ Error RALocalAllocator::switchToAssignment(
// Reset as we will do some changes to the current assignment.
runId = -1;
if (_archTraits->hasSwap(group)) {
if (_archTraits->hasInstRegSwap(group)) {
ASMJIT_PROPAGATE(onSwapReg(group, curWorkId, physId, dstWorkId, altPhysId));
}
else {
@@ -252,7 +232,7 @@ Error RALocalAllocator::switchToAssignment(
ASMJIT_PROPAGATE(onKillReg(group, curWorkId, physId));
}
else {
uint32_t allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
RegMask allocableRegs = _pass->_availableRegs[group] & ~cur.assigned(group);
// If possible don't conflict with assigned regs at DST.
if (allocableRegs & ~dst.assigned(group))
@@ -294,9 +274,8 @@ Cleared:
// CUR dirty, DST not dirty (the assert is just to visualize the condition).
ASMJIT_ASSERT(!dst.isPhysDirty(group, physId) && cur.isPhysDirty(group, physId));
// If `dstReadOnly` is true it means that that block was already
// processed and we cannot change from CLEAN to DIRTY. In that case
// the register has to be saved as it cannot enter the block DIRTY.
// If `dstReadOnly` is true it means that that block was already processed and we cannot change from
// CLEAN to DIRTY. In that case the register has to be saved as it cannot enter the block DIRTY.
if (dstReadOnly)
ASMJIT_PROPAGATE(onSaveReg(group, dstWorkId, physId));
else
@@ -319,13 +298,13 @@ Cleared:
}
}
// ------------------------------------------------------------------------
// STEP 3:
// STEP 3
// ------
//
// - Load registers specified by `willLoadRegs`.
// ------------------------------------------------------------------------
{
Support::BitWordIterator<uint32_t> it(willLoadRegs);
Support::BitWordIterator<RegMask> it(willLoadRegs);
while (it.hasNext()) {
uint32_t physId = it.next();
@@ -349,7 +328,7 @@ Cleared:
}
if (!tryMode) {
// Hre is a code that dumps the conflicting part if something fails here:
// Here is a code that dumps the conflicting part if something fails here:
// if (!dst.equals(cur)) {
// uint32_t physTotal = dst._layout.physTotal;
// uint32_t workCount = dst._layout.workCount;
@@ -374,9 +353,9 @@ Cleared:
return kErrorOk;
}
Error RALocalAllocator::spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noexcept {
uint32_t group = BaseReg::kGroupGp;
Support::BitWordIterator<uint32_t> it(scratchRegs);
Error RALocalAllocator::spillScratchGpRegsBeforeEntry(RegMask scratchRegs) noexcept {
RegGroup group = RegGroup::kGp;
Support::BitWordIterator<RegMask> it(scratchRegs);
while (it.hasNext()) {
uint32_t physId = it.next();
@@ -389,15 +368,15 @@ Error RALocalAllocator::spillScratchGpRegsBeforeEntry(uint32_t scratchRegs) noex
return kErrorOk;
}
// ============================================================================
// [asmjit::RALocalAllocator - Allocation]
// ============================================================================
// RALocalAllocator - Allocation
// =============================
Error RALocalAllocator::allocInst(InstNode* node) noexcept {
RAInst* raInst = node->passData<RAInst>();
RATiedReg* outTiedRegs[Globals::kMaxPhysRegs];
RATiedReg* dupTiedRegs[Globals::kMaxPhysRegs];
RATiedReg* consecutiveRegs[kMaxConsecutiveRegs];
// The cursor must point to the previous instruction for a possible instruction insertion.
_cc->_setCursor(node->prev());
@@ -410,34 +389,43 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
// Whether we already replaced register operand with memory operand.
bool rmAllocated = false;
for (uint32_t group = 0; group < BaseReg::kGroupVirt; group++) {
for (RegGroup group : RegGroupVirtValues{}) {
uint32_t i, count = this->tiedCount(group);
RATiedReg* tiedRegs = this->tiedRegs(group);
uint32_t willUse = _raInst->_usedRegs[group];
uint32_t willOut = _raInst->_clobberedRegs[group];
uint32_t willFree = 0;
uint32_t usePending = count;
RegMask willUse = _raInst->_usedRegs[group];
RegMask willOut = _raInst->_clobberedRegs[group];
RegMask willFree = 0;
uint32_t usePending = count;
uint32_t outTiedCount = 0;
uint32_t dupTiedCount = 0;
uint32_t consecutiveMask = 0;
// ------------------------------------------------------------------------
// STEP 1:
// STEP 1
// ------
//
// Calculate `willUse` and `willFree` masks based on tied registers we have.
// Calculate `willUse` and `willFree` masks based on tied registers we have. In addition, aggregate information
// regarding consecutive registers used by this instruction. We need that to make USE/OUT assignments.
//
// We don't do any assignment decisions at this stage as we just need to
// collect some information first. Then, after we populate all masks needed
// we can finally make some decisions in the second loop. The main reason
// for this is that we really need `willFree` to make assignment decisions
// for `willUse`, because if we mark some registers that will be freed, we
// can consider them in decision making afterwards.
// ------------------------------------------------------------------------
// We don't do any assignment decisions at this stage as we just need to collect some information first. Then,
// after we populate all masks needed we can finally make some decisions in the second loop. The main reason
// for this is that we really need `willFree` to make assignment decisions for `willUse`, because if we mark
// some registers that will be freed, we can consider them in decision making afterwards.
for (i = 0; i < count; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
if (tiedReg->hasAnyConsecutiveFlag()) {
uint32_t consecutiveOffset = tiedReg->isLeadConsecutive() ? uint32_t(0) : tiedReg->consecutiveData();
if (ASMJIT_UNLIKELY(Support::bitTest(consecutiveMask, consecutiveOffset)))
return DebugUtils::errored(kErrorInvalidState);
consecutiveMask |= Support::bitMask(consecutiveOffset);
consecutiveRegs[consecutiveOffset] = tiedReg;
}
// Add OUT and KILL to `outPending` for CLOBBERing and/or OUT assignment.
if (tiedReg->isOutOrKill())
outTiedRegs[outTiedCount++] = tiedReg;
@@ -451,12 +439,16 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
continue;
}
// Don't assign anything here if this is a consecutive USE - we will handle this in STEP 2 instead.
if (tiedReg->isUseConsecutive())
continue;
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
if (tiedReg->hasUseId()) {
// If the register has `useId` it means it can only be allocated in that register.
uint32_t useMask = Support::bitMask(tiedReg->useId());
RegMask useMask = Support::bitMask(tiedReg->useId());
// RAInstBuilder must have collected `usedRegs` on-the-fly.
ASMJIT_ASSERT((willUse & useMask) != 0);
@@ -475,9 +467,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
else {
// Check if the register must be moved to `allocableRegs`.
uint32_t allocableRegs = tiedReg->allocableRegs();
RegMask allocableRegs = tiedReg->useRegMask();
if (assignedId != RAAssignment::kPhysNone) {
uint32_t assignedMask = Support::bitMask(assignedId);
RegMask assignedMask = Support::bitMask(assignedId);
if ((allocableRegs & ~willUse) & assignedMask) {
tiedReg->setUseId(assignedId);
tiedReg->markUseDone();
@@ -493,24 +485,107 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
}
// ------------------------------------------------------------------------
// STEP 2:
// STEP 2
// ------
//
// Do some decision making to find the best candidates of registers that
// need to be assigned, moved, and/or spilled. Only USE registers are
// considered here, OUT will be decided later after all CLOBBERed and OUT
// Verify that all the consecutive registers are really consecutive. Terminate if there is a gap. In addition,
// decide which USE ids will be used in case that this consecutive sequence is USE (OUT registers are allocated
// in a different step).
uint32_t consecutiveCount = 0;
if (consecutiveMask) {
if ((consecutiveMask & (consecutiveMask + 1u)) != 0)
return DebugUtils::errored(kErrorInvalidState);
// Count of trailing ones is the count of consecutive registers. There cannot be gap.
consecutiveCount = Support::ctz(~consecutiveMask);
// Prioritize allocation that would result in least moves even when moving registers away from their homes.
RATiedReg* lead = consecutiveRegs[0];
// Assign the best possible USE Ids to all consecutives.
if (lead->isUseConsecutive()) {
uint32_t bestScore = 0;
uint32_t bestLeadReg = 0xFFFFFFFF;
RegMask allocableRegs = (_availableRegs[group] | willFree) & ~willUse;
uint32_t assignments[kMaxConsecutiveRegs];
for (i = 0; i < consecutiveCount; i++)
assignments[i] = _curAssignment.workToPhysId(group, consecutiveRegs[i]->workId());
Support::BitWordIterator<uint32_t> it(lead->useRegMask());
while (it.hasNext()) {
uint32_t regIndex = it.next();
if (Support::bitTest(lead->useRegMask(), regIndex)) {
uint32_t score = 15;
for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = regIndex + i;
if (!Support::bitTest(allocableRegs, consecutiveIndex)) {
score = 0;
break;
}
RAWorkReg* workReg = workRegById(consecutiveRegs[i]->workId());
score += uint32_t(workReg->homeRegId() == consecutiveIndex);
score += uint32_t(assignments[i] == consecutiveIndex) * 2;
}
if (score > bestScore) {
bestScore = score;
bestLeadReg = regIndex;
}
}
}
if (bestLeadReg == 0xFFFFFFFF)
return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = bestLeadReg + i;
RATiedReg* tiedReg = consecutiveRegs[i];
RegMask useMask = Support::bitMask(consecutiveIndex);
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
tiedReg->setUseId(consecutiveIndex);
if (assignedId == consecutiveIndex) {
// If the register is already allocated in this one, mark it done and continue.
tiedReg->markUseDone();
if (tiedReg->isWrite())
_curAssignment.makeDirty(group, workId, assignedId);
usePending--;
willUse |= useMask;
}
else {
willUse |= useMask;
willFree |= useMask & _curAssignment.assigned(group);
}
}
}
}
// STEP 3
// ------
//
// Do some decision making to find the best candidates of registers that need to be assigned, moved, and/or
// spilled. Only USE registers are considered here, OUT will be decided later after all CLOBBERed and OUT
// registers are unassigned.
// ------------------------------------------------------------------------
if (usePending) {
// TODO: Not sure `liveRegs` should be used, maybe willUse and willFree would be enough and much more clear.
// All registers that are currently alive without registers that will be freed.
uint32_t liveRegs = _curAssignment.assigned(group) & ~willFree;
RegMask liveRegs = _curAssignment.assigned(group) & ~willFree;
for (i = 0; i < count; i++) {
RATiedReg* tiedReg = &tiedRegs[i];
if (tiedReg->isUseDone()) continue;
if (tiedReg->isUseDone())
continue;
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -538,18 +613,17 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
if (!tiedReg->hasUseId()) {
uint32_t allocableRegs = tiedReg->allocableRegs() & ~(willFree | willUse);
// DECIDE where to assign the USE register.
RegMask allocableRegs = tiedReg->useRegMask() & ~(willFree | willUse);
uint32_t useId = decideOnAssignment(group, workId, assignedId, allocableRegs);
uint32_t useMask = Support::bitMask(useId);
RegMask useMask = Support::bitMask(useId);
willUse |= useMask;
willFree |= useMask & liveRegs;
tiedReg->setUseId(useId);
if (assignedId != RAAssignment::kPhysNone) {
uint32_t assignedMask = Support::bitMask(assignedId);
RegMask assignedMask = Support::bitMask(assignedId);
willFree |= assignedMask;
liveRegs &= ~assignedMask;
@@ -579,19 +653,18 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
}
// Initially all used regs will be marked clobbered.
uint32_t clobberedByInst = willUse | willOut;
// Initially all used regs will be marked as clobbered.
RegMask clobberedByInst = willUse | willOut;
// ------------------------------------------------------------------------
// STEP 3:
// STEP 4
// ------
//
// Free all registers that we marked as `willFree`. Only registers that are not
// USEd by the instruction are considered as we don't want to free regs we need.
// ------------------------------------------------------------------------
// Free all registers that we marked as `willFree`. Only registers that are not USEd by the instruction are
// considered as we don't want to free regs we need.
if (willFree) {
uint32_t allocableRegs = _availableRegs[group] & ~(_curAssignment.assigned(group) | willFree | willUse | willOut);
Support::BitWordIterator<uint32_t> it(willFree);
RegMask allocableRegs = _availableRegs[group] & ~(_curAssignment.assigned(group) | willFree | willUse | willOut);
Support::BitWordIterator<RegMask> it(willFree);
do {
uint32_t assignedId = it.next();
@@ -613,21 +686,17 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
} while (it.hasNext());
}
// ------------------------------------------------------------------------
// STEP 4:
// STEP 5
// ------
//
// ALLOCATE / SHUFFLE all registers that we marked as `willUse` and weren't
// allocated yet. This is a bit complicated as the allocation is iterative.
// In some cases we have to wait before allocating a particual physical
// register as it's still occupied by some other one, which we need to move
// before we can use it. In this case we skip it and allocate another some
// other instead (making it free for another iteration).
// ALLOCATE / SHUFFLE all registers that we marked as `willUse` and weren't allocated yet. This is a bit
// complicated as the allocation is iterative. In some cases we have to wait before allocating a particual
// physical register as it's still occupied by some other one, which we need to move before we can use it.
// In this case we skip it and allocate another some other instead (making it free for another iteration).
//
// NOTE: Iterations are mostly important for complicated allocations like
// function calls, where there can be up to N registers used at once. Asm
// instructions won't run the loop more than once in 99.9% of cases as they
// use 2..3 registers in average.
// ------------------------------------------------------------------------
// NOTE: Iterations are mostly important for complicated allocations like function calls, where there can
// be up to N registers used at once. Asm instructions won't run the loop more than once in 99.9% of cases
// as they use 2..3 registers in average.
if (usePending) {
bool mustSwap = false;
@@ -636,7 +705,8 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
for (i = 0; i < count; i++) {
RATiedReg* thisTiedReg = &tiedRegs[i];
if (thisTiedReg->isUseDone()) continue;
if (thisTiedReg->isUseDone())
continue;
uint32_t thisWorkId = thisTiedReg->workId();
uint32_t thisPhysId = _curAssignment.workToPhysId(group, thisWorkId);
@@ -649,11 +719,10 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (targetWorkId != RAAssignment::kWorkNone) {
RAWorkReg* targetWorkReg = workRegById(targetWorkId);
// Swapping two registers can solve two allocation tasks by emitting
// just a single instruction. However, swap is only available on few
// architectures and it's definitely not available for each register
// group. Calling `onSwapReg()` before checking these would be fatal.
if (_archTraits->hasSwap(group) && thisPhysId != RAAssignment::kPhysNone) {
// Swapping two registers can solve two allocation tasks by emitting just a single instruction. However,
// swap is only available on few architectures and it's definitely not available for each register group.
// Calling `onSwapReg()` before checking these would be fatal.
if (_archTraits->hasInstRegSwap(group) && thisPhysId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onSwapReg(group, thisWorkId, thisPhysId, targetWorkId, targetPhysId));
thisTiedReg->markUseDone();
@@ -675,10 +744,9 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
if (!mustSwap)
continue;
// Only branched here if the previous iteration did nothing. This is
// essentially a SWAP operation without having a dedicated instruction
// for that purpose (vector registers, etc). The simplest way to
// handle such case is to SPILL the target register.
// Only branched here if the previous iteration did nothing. This is essentially a SWAP operation without
// having a dedicated instruction for that purpose (vector registers, etc). The simplest way to handle
// such case is to SPILL the target register.
ASMJIT_PROPAGATE(onSpillReg(group, targetWorkId, targetPhysId));
}
@@ -704,11 +772,10 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
} while (usePending);
}
// ------------------------------------------------------------------------
// STEP 5:
// STEP 6
// ------
//
// KILL registers marked as KILL/OUT.
// ------------------------------------------------------------------------
uint32_t outPending = outTiedCount;
if (outTiedCount) {
@@ -718,28 +785,27 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t workId = tiedReg->workId();
uint32_t physId = _curAssignment.workToPhysId(group, workId);
// Must check if it's allocated as KILL can be related to OUT (like KILL
// immediately after OUT, which could mean the register is not assigned).
// Must check if it's allocated as KILL can be related to OUT (like KILL immediately after OUT, which could
// mean the register is not assigned).
if (physId != RAAssignment::kPhysNone) {
ASMJIT_PROPAGATE(onKillReg(group, workId, physId));
willOut &= ~Support::bitMask(physId);
}
// We still maintain number of pending registers for OUT assignment.
// So, if this is only KILL, not OUT, we can safely decrement it.
// We still maintain number of pending registers for OUT assignment. So, if this is only KILL, not OUT, we
// can safely decrement it.
outPending -= !tiedReg->isOut();
}
}
// ------------------------------------------------------------------------
// STEP 6:
// STEP 7
// ------
//
// SPILL registers that will be CLOBBERed. Since OUT and KILL were
// already processed this is used mostly to handle function CALLs.
// ------------------------------------------------------------------------
// SPILL registers that will be CLOBBERed. Since OUT and KILL were already processed this is used mostly to
// handle function CALLs.
if (willOut) {
Support::BitWordIterator<uint32_t> it(willOut);
Support::BitWordIterator<RegMask> it(willOut);
do {
uint32_t physId = it.next();
uint32_t workId = _curAssignment.physToWorkId(group, physId);
@@ -751,18 +817,17 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
} while (it.hasNext());
}
// ------------------------------------------------------------------------
// STEP 7:
// STEP 8
// ------
//
// Duplication.
// ------------------------------------------------------------------------
for (i = 0; i < dupTiedCount; i++) {
RATiedReg* tiedReg = dupTiedRegs[i];
uint32_t workId = tiedReg->workId();
uint32_t srcId = tiedReg->useId();
Support::BitWordIterator<uint32_t> it(tiedReg->_allocableRegs);
Support::BitWordIterator<RegMask> it(tiedReg->useRegMask());
while (it.hasNext()) {
uint32_t dstId = it.next();
if (dstId == srcId)
@@ -771,27 +836,71 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
}
// ------------------------------------------------------------------------
// STEP 8:
// STEP 9
// ------
//
// Assign OUT registers.
// ------------------------------------------------------------------------
if (outPending) {
// Live registers, we need a separate variable (outside of `_curAssignment)
// to hold these because of KILLed registers. If we KILL a register here it
// will go out from `_curAssignment`, but we cannot assign to it in here.
uint32_t liveRegs = _curAssignment.assigned(group);
// Live registers, we need a separate register (outside of `_curAssignment) to hold these because of KILLed
// registers. If we KILL a register here it will go out from `_curAssignment`, but we cannot assign to it in
// here.
RegMask liveRegs = _curAssignment.assigned(group);
// Must avoid as they have been already OUTed (added during the loop).
uint32_t outRegs = 0;
RegMask outRegs = 0;
// Must avoid as they collide with already allocated ones.
uint32_t avoidRegs = willUse & ~clobberedByInst;
RegMask avoidRegs = willUse & ~clobberedByInst;
// Assign the best possible OUT ids of all consecutives.
if (consecutiveCount) {
RATiedReg* lead = consecutiveRegs[0];
if (lead->isOutConsecutive()) {
uint32_t bestScore = 0;
uint32_t bestLeadReg = 0xFFFFFFFF;
RegMask allocableRegs = _availableRegs[group] & ~(outRegs | avoidRegs);
Support::BitWordIterator<uint32_t> it(lead->outRegMask());
while (it.hasNext()) {
uint32_t regIndex = it.next();
if (Support::bitTest(lead->outRegMask(), regIndex)) {
uint32_t score = 15;
for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = regIndex + i;
if (!Support::bitTest(allocableRegs, consecutiveIndex)) {
score = 0;
break;
}
RAWorkReg* workReg = workRegById(consecutiveRegs[i]->workId());
score += uint32_t(workReg->homeRegId() == consecutiveIndex);
}
if (score > bestScore) {
bestScore = score;
bestLeadReg = regIndex;
}
}
}
if (bestLeadReg == 0xFFFFFFFF)
return DebugUtils::errored(kErrorConsecutiveRegsAllocation);
for (i = 0; i < consecutiveCount; i++) {
uint32_t consecutiveIndex = bestLeadReg + i;
RATiedReg* tiedReg = consecutiveRegs[i];
tiedReg->setOutId(consecutiveIndex);
}
}
}
// Allocate OUT registers.
for (i = 0; i < outTiedCount; i++) {
RATiedReg* tiedReg = outTiedRegs[i];
if (!tiedReg->isOut()) continue;
if (!tiedReg->isOut())
continue;
uint32_t workId = tiedReg->workId();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
@@ -801,7 +910,7 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
uint32_t physId = tiedReg->outId();
if (physId == RAAssignment::kPhysNone) {
uint32_t allocableRegs = tiedReg->_allocableRegs & ~(outRegs | avoidRegs);
RegMask allocableRegs = tiedReg->outRegMask() & ~(outRegs | avoidRegs);
if (!(allocableRegs & ~liveRegs)) {
// There are no more registers, decide which one to spill.
@@ -839,9 +948,8 @@ Error RALocalAllocator::allocInst(InstNode* node) noexcept {
}
Error RALocalAllocator::spillAfterAllocation(InstNode* node) noexcept {
// This is experimental feature that would spill registers that don't have
// home-id and are last in this basic block. This prevents saving these regs
// in other basic blocks and then restoring them (mostly relevant for loops).
// This is experimental feature that would spill registers that don't have home-id and are last in this basic block.
// This prevents saving these regs in other basic blocks and then restoring them (mostly relevant for loops).
RAInst* raInst = node->passData<RAInst>();
uint32_t count = raInst->tiedCount();
@@ -851,7 +959,7 @@ Error RALocalAllocator::spillAfterAllocation(InstNode* node) noexcept {
uint32_t workId = tiedReg->workId();
RAWorkReg* workReg = workRegById(workId);
if (!workReg->hasHomeRegId()) {
uint32_t group = workReg->group();
RegGroup group = workReg->group();
uint32_t assignedId = _curAssignment.workToPhysId(group, workId);
if (assignedId != RAAssignment::kPhysNone) {
_cc->_setCursor(node);
@@ -898,9 +1006,8 @@ Error RALocalAllocator::allocBranch(InstNode* node, RABlock* target, RABlock* co
BaseNode* curCursor = _cc->cursor();
if (curCursor != injectionPoint) {
// Additional instructions emitted to switch from the current state to
// the `target` state. This means that we have to move these instructions
// into an independent code block and patch the jump location.
// Additional instructions emitted to switch from the current state to the `target` state. This means
// that we have to move these instructions into an independent code block and patch the jump location.
Operand& targetOp = node->op(node->opCount() - 1);
if (ASMJIT_UNLIKELY(!targetOp.isLabel()))
return DebugUtils::errored(kErrorInvalidState);
@@ -911,9 +1018,9 @@ Error RALocalAllocator::allocBranch(InstNode* node, RABlock* target, RABlock* co
// Patch `target` to point to the `trampoline` we just created.
targetOp = trampoline;
// Clear a possible SHORT form as we have no clue now if the SHORT form would
// be encodable after patching the target to `trampoline` (X86 specific).
node->clearInstOptions(BaseInst::kOptionShortForm);
// Clear a possible SHORT form as we have no clue now if the SHORT form would be encodable after patching
// the target to `trampoline` (X86 specific).
node->clearOptions(InstOptions::kShortForm);
// Finalize the switch assignment sequence.
ASMJIT_PROPAGATE(_pass->emitJump(savedTarget));
@@ -969,11 +1076,10 @@ Error RALocalAllocator::allocJumpTable(InstNode* node, const RABlocks& targets,
return kErrorOk;
}
// ============================================================================
// [asmjit::RALocalAllocator - Decision Making]
// ============================================================================
// RALocalAllocator - Decision Making
// ==================================
uint32_t RALocalAllocator::decideOnAssignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
uint32_t RALocalAllocator::decideOnAssignment(RegGroup group, uint32_t workId, uint32_t physId, RegMask allocableRegs) const noexcept {
ASMJIT_ASSERT(allocableRegs != 0);
DebugUtils::unused(group, physId);
@@ -987,14 +1093,14 @@ uint32_t RALocalAllocator::decideOnAssignment(uint32_t group, uint32_t workId, u
}
// Prefer registers used upon block entries.
uint32_t previouslyAssignedRegs = workReg->allocatedMask();
RegMask previouslyAssignedRegs = workReg->allocatedMask();
if (allocableRegs & previouslyAssignedRegs)
allocableRegs &= previouslyAssignedRegs;
return Support::ctz(allocableRegs);
}
uint32_t RALocalAllocator::decideOnReassignment(uint32_t group, uint32_t workId, uint32_t physId, uint32_t allocableRegs) const noexcept {
uint32_t RALocalAllocator::decideOnReassignment(RegGroup group, uint32_t workId, uint32_t physId, RegMask allocableRegs) const noexcept {
ASMJIT_ASSERT(allocableRegs != 0);
DebugUtils::unused(group, physId);
@@ -1012,12 +1118,12 @@ uint32_t RALocalAllocator::decideOnReassignment(uint32_t group, uint32_t workId,
return RAAssignment::kPhysNone;
}
uint32_t RALocalAllocator::decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept {
uint32_t RALocalAllocator::decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept {
// May be used in the future to decide which register would be best to spill so `workId` can be assigned.
DebugUtils::unused(workId);
ASMJIT_ASSERT(spillableRegs != 0);
Support::BitWordIterator<uint32_t> it(spillableRegs);
Support::BitWordIterator<RegMask> it(spillableRegs);
uint32_t bestPhysId = it.next();
uint32_t bestWorkId = _curAssignment.physToWorkId(group, bestPhysId);

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RALOCAL_P_H_INCLUDED
#define ASMJIT_CORE_RALOCAL_P_H_INCLUDED
@@ -38,10 +20,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RALocalAllocator]
// ============================================================================
//! Local register allocator.
class RALocalAllocator {
public:
@@ -119,14 +97,14 @@ public:
//! Returns all tied regs as `RATiedReg` array.
inline RATiedReg* tiedRegs() const noexcept { return _raInst->tiedRegs(); }
//! Returns tied registers grouped by the given `group`.
inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return _raInst->tiedRegs(group); }
inline RATiedReg* tiedRegs(RegGroup group) const noexcept { return _raInst->tiedRegs(group); }
//! Returns count of all TiedRegs used by the instruction.
inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of TiedRegs used by the given register `group`.
inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount.get(group); }
inline uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount.get(group); }
inline bool isGroupUsed(uint32_t group) const noexcept { return _tiedCount[group] != 0; }
inline bool isGroupUsed(RegGroup group) const noexcept { return _tiedCount[group] != 0; }
//! \}
@@ -139,14 +117,12 @@ public:
const PhysToWorkMap* physToWorkMap,
const WorkToPhysMap* workToPhysMap) noexcept;
//! Switch to the given assignment by reassigning all register and emitting
//! code that reassigns them. This is always used to switch to a previously
//! stored assignment.
//! Switch to the given assignment by reassigning all register and emitting code that reassigns them.
//! This is always used to switch to a previously stored assignment.
//!
//! If `tryMode` is true then the final assignment doesn't have to be exactly
//! same as specified by `dstPhysToWorkMap` and `dstWorkToPhysMap`. This mode
//! is only used before conditional jumps that already have assignment to
//! generate a code sequence that is always executed regardless of the flow.
//! If `tryMode` is true then the final assignment doesn't have to be exactly same as specified by `dstPhysToWorkMap`
//! and `dstWorkToPhysMap`. This mode is only used before conditional jumps that already have assignment to generate
//! a code sequence that is always executed regardless of the flow.
Error switchToAssignment(
PhysToWorkMap* dstPhysToWorkMap,
WorkToPhysMap* dstWorkToPhysMap,
@@ -185,7 +161,7 @@ public:
return uint32_t(int32_t(freq * float(kCostOfFrequency)));
}
inline uint32_t calculateSpillCost(uint32_t group, uint32_t workId, uint32_t assignedId) const noexcept {
inline uint32_t calculateSpillCost(RegGroup group, uint32_t workId, uint32_t assignedId) const noexcept {
RAWorkReg* workReg = workRegById(workId);
uint32_t cost = costByFrequency(workReg->liveStats().freq());
@@ -196,18 +172,18 @@ public:
}
//! Decides on register assignment.
uint32_t decideOnAssignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
uint32_t decideOnAssignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated
//! in a physical register that have to be used by another WorkReg.
//! Decides on whether to MOVE or SPILL the given WorkReg, because it's allocated in a physical register that have
//! to be used by another WorkReg.
//!
//! The function must return either `RAAssignment::kPhysNone`, which means that
//! the WorkReg of `workId` should be spilled, or a valid physical register ID,
//! which means that the register should be moved to that physical register instead.
uint32_t decideOnReassignment(uint32_t group, uint32_t workId, uint32_t assignedId, uint32_t allocableRegs) const noexcept;
//! The function must return either `RAAssignment::kPhysNone`, which means that the WorkReg of `workId` should be
//! spilled, or a valid physical register ID, which means that the register should be moved to that physical register
//! instead.
uint32_t decideOnReassignment(RegGroup group, uint32_t workId, uint32_t assignedId, RegMask allocableRegs) const noexcept;
//! Decides on best spill given a register mask `spillableRegs`
uint32_t decideOnSpillFor(uint32_t group, uint32_t workId, uint32_t spillableRegs, uint32_t* spillWorkId) const noexcept;
uint32_t decideOnSpillFor(RegGroup group, uint32_t workId, RegMask spillableRegs, uint32_t* spillWorkId) const noexcept;
//! \}
@@ -216,7 +192,7 @@ public:
//! Emits a move between a destination and source register, and fixes the
//! register assignment.
inline Error onMoveReg(uint32_t group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
inline Error onMoveReg(RegGroup group, uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept {
if (dstPhysId == srcPhysId) return kErrorOk;
_curAssignment.reassign(group, workId, dstPhysId, srcPhysId);
return _pass->emitMove(workId, dstPhysId, srcPhysId);
@@ -225,21 +201,21 @@ public:
//! Emits a swap between two physical registers and fixes their assignment.
//!
//! \note Target must support this operation otherwise this would ASSERT.
inline Error onSwapReg(uint32_t group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
inline Error onSwapReg(RegGroup group, uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept {
_curAssignment.swap(group, aWorkId, aPhysId, bWorkId, bPhysId);
return _pass->emitSwap(aWorkId, aPhysId, bWorkId, bPhysId);
}
//! Emits a load from [VirtReg/WorkReg]'s spill slot to a physical register
//! and makes it assigned and clean.
inline Error onLoadReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline Error onLoadReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.assign(group, workId, physId, RAAssignment::kClean);
return _pass->emitLoad(workId, physId);
}
//! Emits a save a physical register to a [VirtReg/WorkReg]'s spill slot,
//! keeps it assigned, and makes it clean.
inline Error onSaveReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline Error onSaveReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
ASMJIT_ASSERT(_curAssignment.workToPhysId(group, workId) == physId);
ASMJIT_ASSERT(_curAssignment.physToWorkId(group, physId) == workId);
@@ -248,24 +224,24 @@ public:
}
//! Assigns a register, the content of it is undefined at this point.
inline Error onAssignReg(uint32_t group, uint32_t workId, uint32_t physId, uint32_t dirty) noexcept {
inline Error onAssignReg(RegGroup group, uint32_t workId, uint32_t physId, bool dirty) noexcept {
_curAssignment.assign(group, workId, physId, dirty);
return kErrorOk;
}
//! Spills a variable/register, saves the content to the memory-home if modified.
inline Error onSpillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline Error onSpillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
if (_curAssignment.isPhysDirty(group, physId))
ASMJIT_PROPAGATE(onSaveReg(group, workId, physId));
return onKillReg(group, workId, physId);
}
inline Error onDirtyReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline Error onDirtyReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.makeDirty(group, workId, physId);
return kErrorOk;
}
inline Error onKillReg(uint32_t group, uint32_t workId, uint32_t physId) noexcept {
inline Error onKillReg(RegGroup group, uint32_t workId, uint32_t physId) noexcept {
_curAssignment.unassign(group, workId, physId);
return kErrorOk;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RAPASS_P_H_INCLUDED
#define ASMJIT_CORE_RAPASS_P_H_INCLUDED
@@ -40,9 +22,34 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RABlock]
// ============================================================================
//! Flags used by \ref RABlock.
enum class RABlockFlags : uint32_t {
//! No flags.
kNone = 0,
//! Block has been constructed from nodes.
kIsConstructed = 0x00000001u,
//! Block is reachable (set by `buildCFGViews()`).
kIsReachable = 0x00000002u,
//! Block is a target (has an associated label or multiple labels).
kIsTargetable = 0x00000004u,
//! Block has been allocated.
kIsAllocated = 0x00000008u,
//! Block is a function-exit.
kIsFuncExit = 0x00000010u,
//! Block has a terminator (jump, conditional jump, ret).
kHasTerminator = 0x00000100u,
//! Block naturally flows to the next block.
kHasConsecutive = 0x00000200u,
//! Block has a jump to a jump-table at the end.
kHasJumpTable = 0x00000400u,
//! Block contains fixed registers (precolored).
kHasFixedRegs = 0x00000800u,
//! Block contains function calls.
kHasFuncCalls = 0x00001000u
};
ASMJIT_DEFINE_ENUM_FLAGS(RABlockFlags)
//! Basic block used by register allocator pass.
class RABlock {
@@ -52,42 +59,34 @@ public:
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
enum Id : uint32_t {
//! \name Constants
//! \{
enum : uint32_t {
//! Unassigned block id.
kUnassignedId = 0xFFFFFFFFu
};
//! Basic block flags.
enum Flags : uint32_t {
//! Block has been constructed from nodes.
kFlagIsConstructed = 0x00000001u,
//! Block is reachable (set by `buildViews()`).
kFlagIsReachable = 0x00000002u,
//! Block is a target (has an associated label or multiple labels).
kFlagIsTargetable = 0x00000004u,
//! Block has been allocated.
kFlagIsAllocated = 0x00000008u,
//! Block is a function-exit.
kFlagIsFuncExit = 0x00000010u,
//! Block has a terminator (jump, conditional jump, ret).
kFlagHasTerminator = 0x00000100u,
//! Block naturally flows to the next block.
kFlagHasConsecutive = 0x00000200u,
//! Block has a jump to a jump-table at the end.
kFlagHasJumpTable = 0x00000400u,
//! Block contains fixed registers (precolored).
kFlagHasFixedRegs = 0x00000800u,
//! Block contains function calls.
kFlagHasFuncCalls = 0x00001000u
enum LiveType : uint32_t {
kLiveIn = 0,
kLiveOut = 1,
kLiveGen = 2,
kLiveKill = 3,
kLiveCount = 4
};
//! \}
//! \name Members
//! \{
//! Register allocator pass.
BaseRAPass* _ra;
//! Block id (indexed from zero).
uint32_t _blockId = kUnassignedId;
//! Block flags, see `Flags`.
uint32_t _flags = 0;
RABlockFlags _flags = RABlockFlags::kNone;
//! First `BaseNode` of this block (inclusive).
BaseNode* _first = nullptr;
@@ -119,30 +118,24 @@ public:
//! Block successors.
RABlocks _successors {};
enum LiveType : uint32_t {
kLiveIn = 0,
kLiveOut = 1,
kLiveGen = 2,
kLiveKill = 3,
kLiveCount = 4
};
//! Liveness in/out/use/kill.
ZoneBitVector _liveBits[kLiveCount] {};
//! Shared assignment it or `Globals::kInvalidId` if this block doesn't
//! have shared assignment. See `RASharedAssignment` for more details.
//! Shared assignment it or `Globals::kInvalidId` if this block doesn't have shared assignment.
//! See \ref RASharedAssignment for more details.
uint32_t _sharedAssignmentId = Globals::kInvalidId;
//! Scratch registers that cannot be allocated upon block entry.
uint32_t _entryScratchGpRegs = 0;
RegMask _entryScratchGpRegs = 0;
//! Scratch registers used at exit, by a terminator instruction.
uint32_t _exitScratchGpRegs = 0;
RegMask _exitScratchGpRegs = 0;
//! Register assignment (PhysToWork) on entry.
PhysToWorkMap* _entryPhysToWorkMap = nullptr;
//! Register assignment (WorkToPhys) on entry.
WorkToPhysMap* _entryWorkToPhysMap = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
@@ -158,37 +151,43 @@ public:
inline ZoneAllocator* allocator() const noexcept;
inline uint32_t blockId() const noexcept { return _blockId; }
inline uint32_t flags() const noexcept { return _flags; }
inline RABlockFlags flags() const noexcept { return _flags; }
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
inline bool hasFlag(RABlockFlags flag) const noexcept { return Support::test(_flags, flag); }
inline void addFlags(RABlockFlags flags) noexcept { _flags |= flags; }
inline bool isAssigned() const noexcept { return _blockId != kUnassignedId; }
inline bool isConstructed() const noexcept { return hasFlag(kFlagIsConstructed); }
inline bool isReachable() const noexcept { return hasFlag(kFlagIsReachable); }
inline bool isTargetable() const noexcept { return hasFlag(kFlagIsTargetable); }
inline bool isAllocated() const noexcept { return hasFlag(kFlagIsAllocated); }
inline bool isFuncExit() const noexcept { return hasFlag(kFlagIsFuncExit); }
inline bool isConstructed() const noexcept { return hasFlag(RABlockFlags::kIsConstructed); }
inline bool isReachable() const noexcept { return hasFlag(RABlockFlags::kIsReachable); }
inline bool isTargetable() const noexcept { return hasFlag(RABlockFlags::kIsTargetable); }
inline bool isAllocated() const noexcept { return hasFlag(RABlockFlags::kIsAllocated); }
inline bool isFuncExit() const noexcept { return hasFlag(RABlockFlags::kIsFuncExit); }
inline bool hasTerminator() const noexcept { return hasFlag(RABlockFlags::kHasTerminator); }
inline bool hasConsecutive() const noexcept { return hasFlag(RABlockFlags::kHasConsecutive); }
inline bool hasJumpTable() const noexcept { return hasFlag(RABlockFlags::kHasJumpTable); }
inline void makeConstructed(const RARegsStats& regStats) noexcept {
_flags |= kFlagIsConstructed;
_flags |= RABlockFlags::kIsConstructed;
_regsStats.combineWith(regStats);
}
inline void makeReachable() noexcept { _flags |= kFlagIsReachable; }
inline void makeTargetable() noexcept { _flags |= kFlagIsTargetable; }
inline void makeAllocated() noexcept { _flags |= kFlagIsAllocated; }
inline void makeReachable() noexcept { _flags |= RABlockFlags::kIsReachable; }
inline void makeTargetable() noexcept { _flags |= RABlockFlags::kIsTargetable; }
inline void makeAllocated() noexcept { _flags |= RABlockFlags::kIsAllocated; }
inline const RARegsStats& regsStats() const noexcept { return _regsStats; }
inline bool hasTerminator() const noexcept { return hasFlag(kFlagHasTerminator); }
inline bool hasConsecutive() const noexcept { return hasFlag(kFlagHasConsecutive); }
inline bool hasJumpTable() const noexcept { return hasFlag(kFlagHasJumpTable); }
inline bool hasPredecessors() const noexcept { return !_predecessors.empty(); }
inline bool hasSuccessors() const noexcept { return !_successors.empty(); }
inline bool hasSuccessor(RABlock* block) noexcept {
if (block->_predecessors.size() < _successors.size())
return block->_predecessors.contains(this);
else
return _successors.contains(block);
}
inline const RABlocks& predecessors() const noexcept { return _predecessors; }
inline const RABlocks& successors() const noexcept { return _successors; }
@@ -206,11 +205,11 @@ public:
inline uint32_t povOrder() const noexcept { return _povOrder; }
inline uint32_t entryScratchGpRegs() const noexcept;
inline uint32_t exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
inline RegMask entryScratchGpRegs() const noexcept;
inline RegMask exitScratchGpRegs() const noexcept { return _exitScratchGpRegs; }
inline void addEntryScratchGpRegs(uint32_t regMask) noexcept { _entryScratchGpRegs |= regMask; }
inline void addExitScratchGpRegs(uint32_t regMask) noexcept { _exitScratchGpRegs |= regMask; }
inline void addEntryScratchGpRegs(RegMask regMask) noexcept { _entryScratchGpRegs |= regMask; }
inline void addExitScratchGpRegs(RegMask regMask) noexcept { _exitScratchGpRegs |= regMask; }
inline bool hasSharedAssignmentId() const noexcept { return _sharedAssignmentId != Globals::kInvalidId; }
inline uint32_t sharedAssignmentId() const noexcept { return _sharedAssignmentId; }
@@ -261,11 +260,9 @@ public:
//! \name Utilities
//! \{
//! Adds a successor to this block, and predecessor to `successor`, making
//! connection on both sides.
//! Adds a successor to this block, and predecessor to `successor`, making connection on both sides.
//!
//! This API must be used to manage successors and predecessors, never manage
//! it manually.
//! This API must be used to manage successors and predecessors, never manage it manually.
Error appendSuccessor(RABlock* successor) noexcept;
//! Similar to `appendSuccessor()`, but does prepend instead append.
@@ -276,19 +273,18 @@ public:
//! \}
};
// ============================================================================
// [asmjit::RAInst]
// ============================================================================
//! Register allocator's data associated with each `InstNode`.
class RAInst {
public:
ASMJIT_NONCOPYABLE(RAInst)
//! \name Members
//! \{
//! Parent block.
RABlock* _block;
//! Instruction flags.
uint32_t _flags;
//! Aggregated RATiedFlags from all operands & instruction specific flags.
RATiedFlags _flags;
//! Total count of RATiedReg's.
uint32_t _tiedTotal;
//! Index of RATiedReg's per register group.
@@ -304,14 +300,12 @@ public:
//! Tied registers.
RATiedReg _tiedRegs[1];
enum Flags : uint32_t {
kFlagIsTransformable = 0x80000000u
};
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE RAInst(RABlock* block, uint32_t flags, uint32_t tiedTotal, const RARegMask& clobberedRegs) noexcept {
inline RAInst(RABlock* block, RATiedFlags flags, uint32_t tiedTotal, const RARegMask& clobberedRegs) noexcept {
_block = block;
_flags = flags;
_tiedTotal = tiedTotal;
@@ -328,18 +322,18 @@ public:
//! \{
//! Returns the instruction flags.
inline uint32_t flags() const noexcept { return _flags; }
inline RATiedFlags flags() const noexcept { return _flags; }
//! Tests whether the instruction has flag `flag`.
inline bool hasFlag(uint32_t flag) const noexcept { return (_flags & flag) != 0; }
inline bool hasFlag(RATiedFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Replaces the existing instruction flags with `flags`.
inline void setFlags(uint32_t flags) noexcept { _flags = flags; }
inline void setFlags(RATiedFlags flags) noexcept { _flags = flags; }
//! Adds instruction `flags` to this RAInst.
inline void addFlags(uint32_t flags) noexcept { _flags |= flags; }
inline void addFlags(RATiedFlags flags) noexcept { _flags |= flags; }
//! Clears instruction `flags` from this RAInst.
inline void clearFlags(uint32_t flags) noexcept { _flags &= ~flags; }
inline void clearFlags(RATiedFlags flags) noexcept { _flags &= ~flags; }
//! Tests whether this instruction can be transformed to another instruction if necessary.
inline bool isTransformable() const noexcept { return hasFlag(kFlagIsTransformable); }
inline bool isTransformable() const noexcept { return hasFlag(RATiedFlags::kInst_IsTransformable); }
//! Returns the associated block with this RAInst.
inline RABlock* block() const noexcept { return _block; }
@@ -347,12 +341,12 @@ public:
//! Returns tied registers (all).
inline RATiedReg* tiedRegs() const noexcept { return const_cast<RATiedReg*>(_tiedRegs); }
//! Returns tied registers for a given `group`.
inline RATiedReg* tiedRegs(uint32_t group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
inline RATiedReg* tiedRegs(RegGroup group) const noexcept { return const_cast<RATiedReg*>(_tiedRegs) + _tiedIndex.get(group); }
//! Returns count of all tied registers.
inline uint32_t tiedCount() const noexcept { return _tiedTotal; }
//! Returns count of tied registers of a given `group`.
inline uint32_t tiedCount(uint32_t group) const noexcept { return _tiedCount[group]; }
inline uint32_t tiedCount(RegGroup group) const noexcept { return _tiedCount[group]; }
//! Returns `RATiedReg` at the given `index`.
inline RATiedReg* tiedAt(uint32_t index) const noexcept {
@@ -361,8 +355,8 @@ public:
}
//! Returns `RATiedReg` at the given `index` of the given register `group`.
inline RATiedReg* tiedOf(uint32_t group, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedCount._regs[group]);
inline RATiedReg* tiedOf(RegGroup group, uint32_t index) const noexcept {
ASMJIT_ASSERT(index < _tiedCount.get(group));
return tiedRegs(group) + index;
}
@@ -381,20 +375,18 @@ public:
//! \}
};
// ============================================================================
// [asmjit::RAInstBuilder]
// ============================================================================
//! A helper class that is used to build an array of RATiedReg items that are
//! then copied to `RAInst`.
//! A helper class that is used to build an array of RATiedReg items that are then copied to `RAInst`.
class RAInstBuilder {
public:
ASMJIT_NONCOPYABLE(RAInstBuilder)
//! \name Members
//! \{
//! Flags combined from all RATiedReg's.
uint32_t _aggregatedFlags;
RATiedFlags _aggregatedFlags;
//! Flags that will be cleared before storing the aggregated flags to `RAInst`.
uint32_t _forbiddenFlags;
RATiedFlags _forbiddenFlags;
RARegCount _count;
RARegsStats _stats;
@@ -406,6 +398,8 @@ public:
//! Array of temporary tied registers.
RATiedReg _tiedRegs[128];
//! \}
//! \name Construction & Destruction
//! \{
@@ -413,8 +407,8 @@ public:
inline void init() noexcept { reset(); }
inline void reset() noexcept {
_aggregatedFlags = 0;
_forbiddenFlags = 0;
_aggregatedFlags = RATiedFlags::kNone;
_forbiddenFlags = RATiedFlags::kNone;
_count.reset();
_stats.reset();
_used.reset();
@@ -427,11 +421,11 @@ public:
//! \name Accessors
//! \{
inline uint32_t aggregatedFlags() const noexcept { return _aggregatedFlags; }
inline uint32_t forbiddenFlags() const noexcept { return _forbiddenFlags; }
inline RATiedFlags aggregatedFlags() const noexcept { return _aggregatedFlags; }
inline RATiedFlags forbiddenFlags() const noexcept { return _forbiddenFlags; }
inline void addAggregatedFlags(uint32_t flags) noexcept { _aggregatedFlags |= flags; }
inline void addForbiddenFlags(uint32_t flags) noexcept { _forbiddenFlags |= flags; }
inline void addAggregatedFlags(RATiedFlags flags) noexcept { _aggregatedFlags |= flags; }
inline void addForbiddenFlags(RATiedFlags flags) noexcept { _forbiddenFlags |= flags; }
//! Returns the number of tied registers added to the builder.
inline uint32_t tiedRegCount() const noexcept { return uint32_t((size_t)(_cur - _tiedRegs)); }
@@ -459,19 +453,26 @@ public:
//! \name Utilities
//! \{
Error add(RAWorkReg* workReg, uint32_t flags, uint32_t allocable, uint32_t useId, uint32_t useRewriteMask, uint32_t outId, uint32_t outRewriteMask, uint32_t rmSize = 0) noexcept {
uint32_t group = workReg->group();
Error add(
RAWorkReg* workReg,
RATiedFlags flags,
RegMask useRegMask, uint32_t useId, uint32_t useRewriteMask,
RegMask outRegMask, uint32_t outId, uint32_t outRewriteMask,
uint32_t rmSize = 0,
uint32_t consecutiveParent = Globals::kInvalidId) noexcept {
RegGroup group = workReg->group();
RATiedReg* tiedReg = workReg->tiedReg();
if (useId != BaseReg::kIdBad) {
_stats.makeFixed(group);
_used[group] |= Support::bitMask(useId);
flags |= RATiedReg::kUseFixed;
flags |= RATiedFlags::kUseFixed;
}
if (outId != BaseReg::kIdBad) {
_clobbered[group] |= Support::bitMask(outId);
flags |= RATiedReg::kOutFixed;
flags |= RATiedFlags::kOutFixed;
}
_aggregatedFlags |= flags;
@@ -482,13 +483,19 @@ public:
ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
tiedReg = _cur++;
tiedReg->init(workReg->workId(), flags, allocable, useId, useRewriteMask, outId, outRewriteMask, rmSize);
tiedReg->init(workReg->workId(), flags, useRegMask, useId, useRewriteMask, outRegMask, outId, outRewriteMask, rmSize, consecutiveParent);
workReg->setTiedReg(tiedReg);
_count.add(group);
return kErrorOk;
}
else {
if (consecutiveParent != tiedReg->consecutiveParent()) {
if (tiedReg->consecutiveParent() != Globals::kInvalidId)
return DebugUtils::errored(kErrorInvalidState);
tiedReg->_consecutiveParent = consecutiveParent;
}
if (useId != BaseReg::kIdBad) {
if (ASMJIT_UNLIKELY(tiedReg->hasUseId()))
return DebugUtils::errored(kErrorOverlappedRegs);
@@ -503,8 +510,9 @@ public:
tiedReg->addRefCount();
tiedReg->addFlags(flags);
tiedReg->_allocableRegs &= allocable;
tiedReg->_useRegMask &= useRegMask;
tiedReg->_useRewriteMask |= useRewriteMask;
tiedReg->_outRegMask &= outRegMask;
tiedReg->_outRewriteMask |= outRewriteMask;
tiedReg->_rmSize = uint8_t(Support::max<uint32_t>(tiedReg->rmSize(), rmSize));
return kErrorOk;
@@ -514,9 +522,9 @@ public:
Error addCallArg(RAWorkReg* workReg, uint32_t useId) noexcept {
ASMJIT_ASSERT(useId != BaseReg::kIdBad);
uint32_t flags = RATiedReg::kUse | RATiedReg::kRead | RATiedReg::kUseFixed;
uint32_t group = workReg->group();
uint32_t allocable = Support::bitMask(useId);
RATiedFlags flags = RATiedFlags::kUse | RATiedFlags::kRead | RATiedFlags::kUseFixed;
RegGroup group = workReg->group();
RegMask allocable = Support::bitMask(useId);
_aggregatedFlags |= flags;
_used[group] |= allocable;
@@ -529,7 +537,7 @@ public:
ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
tiedReg = _cur++;
tiedReg->init(workReg->workId(), flags, allocable, useId, 0, BaseReg::kIdBad, 0);
tiedReg->init(workReg->workId(), flags, allocable, useId, 0, allocable, BaseReg::kIdBad, 0);
workReg->setTiedReg(tiedReg);
_count.add(group);
@@ -537,12 +545,12 @@ public:
}
else {
if (tiedReg->hasUseId()) {
flags |= RATiedReg::kDuplicate;
tiedReg->_allocableRegs |= allocable;
flags |= RATiedFlags::kDuplicate;
tiedReg->_useRegMask |= allocable;
}
else {
tiedReg->setUseId(useId);
tiedReg->_allocableRegs &= allocable;
tiedReg->_useRegMask &= allocable;
}
tiedReg->addRefCount();
@@ -554,12 +562,12 @@ public:
Error addCallRet(RAWorkReg* workReg, uint32_t outId) noexcept {
ASMJIT_ASSERT(outId != BaseReg::kIdBad);
uint32_t flags = RATiedReg::kOut | RATiedReg::kWrite | RATiedReg::kOutFixed;
uint32_t group = workReg->group();
uint32_t allocable = Support::bitMask(outId);
RATiedFlags flags = RATiedFlags::kOut | RATiedFlags::kWrite | RATiedFlags::kOutFixed;
RegGroup group = workReg->group();
RegMask outRegs = Support::bitMask(outId);
_aggregatedFlags |= flags;
_used[group] |= allocable;
_used[group] |= outRegs;
_stats.makeFixed(group);
_stats.makeUsed(group);
@@ -569,7 +577,7 @@ public:
ASMJIT_ASSERT(tiedRegCount() < ASMJIT_ARRAY_SIZE(_tiedRegs));
tiedReg = _cur++;
tiedReg->init(workReg->workId(), flags, allocable, BaseReg::kIdBad, 0, outId, 0);
tiedReg->init(workReg->workId(), flags, Support::allOnes<RegMask>(), BaseReg::kIdBad, 0, outRegs, outId, 0);
workReg->setTiedReg(tiedReg);
_count.add(group);
@@ -589,21 +597,21 @@ public:
//! \}
};
// ============================================================================
// [asmjit::RASharedAssignment]
// ============================================================================
//! Intersection of multiple register assignments.
//!
//! See \ref RAAssignment for more information about register assignments.
class RASharedAssignment {
public:
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! Bit-mask of registers that cannot be used upon a block entry, for each
//! block that has this shared assignment. Scratch registers can come from
//! ISA limits (like jecx/loop instructions on x86) or because the registers
//! are used by jump/branch instruction that uses registers to perform an
//! indirect jump.
uint32_t _entryScratchGpRegs = 0;
//! \name Members
//! \{
//! Bit-mask of registers that cannot be used upon a block entry, for each block that has this shared assignment.
//! Scratch registers can come from ISA limits (like jecx/loop instructions on x86) or because the registers are
//! used by jump/branch instruction that uses registers to perform an indirect jump.
RegMask _entryScratchGpRegs = 0;
//! Union of all live-in registers.
ZoneBitVector _liveIn {};
//! Register assignment (PhysToWork).
@@ -611,30 +619,28 @@ public:
//! Register assignment (WorkToPhys).
WorkToPhysMap* _workToPhysMap = nullptr;
//! Most likely never called as we initialize a vector of shared assignments to zero.
inline RASharedAssignment() noexcept {}
//! \}
inline uint32_t entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
inline void addEntryScratchGpRegs(uint32_t mask) noexcept { _entryScratchGpRegs |= mask; }
//! \name Accessors
//! \{
inline bool empty() const noexcept { return _physToWorkMap == nullptr; }
inline RegMask entryScratchGpRegs() const noexcept { return _entryScratchGpRegs; }
inline void addEntryScratchGpRegs(RegMask mask) noexcept { _entryScratchGpRegs |= mask; }
inline const ZoneBitVector& liveIn() const noexcept { return _liveIn; }
inline PhysToWorkMap* physToWorkMap() const noexcept { return _physToWorkMap; }
inline WorkToPhysMap* workToPhysMap() const noexcept { return _workToPhysMap; }
inline bool empty() const noexcept {
return _physToWorkMap == nullptr;
}
inline void assignMaps(PhysToWorkMap* physToWorkMap, WorkToPhysMap* workToPhysMap) noexcept {
_physToWorkMap = physToWorkMap;
_workToPhysMap = workToPhysMap;
}
};
// ============================================================================
// [asmjit::BaseRAPass]
// ============================================================================
//! \}
};
//! Register allocation pass used by `BaseCompiler`.
class BaseRAPass : public FuncPass {
@@ -642,13 +648,16 @@ public:
ASMJIT_NONCOPYABLE(BaseRAPass)
typedef FuncPass Base;
enum Weights : uint32_t {
enum : uint32_t {
kCallArgWeight = 80
};
typedef RAAssignment::PhysToWorkMap PhysToWorkMap;
typedef RAAssignment::WorkToPhysMap WorkToPhysMap;
//! \name Members
//! \{
//! Allocator that uses zone passed to `runOnFunction()`.
ZoneAllocator _allocator {};
//! Emit helper.
@@ -656,10 +665,10 @@ public:
//! Logger, disabled if null.
Logger* _logger = nullptr;
//! Debug logger, non-null only if `kOptionDebugPasses` option is set.
Logger* _debugLogger = nullptr;
//! Logger flags.
uint32_t _loggerFlags = 0;
//! Format options, copied from Logger, or zeroed if there is no logger.
FormatOptions _formatOptions {};
//! Diagnostic options, copied from Emitter, or zeroed if there is no logger.
DiagnosticOptions _diagnosticOptions {};
//! Function being processed.
FuncNode* _func = nullptr;
@@ -680,7 +689,7 @@ public:
//! Number of created blocks (internal).
uint32_t _createdBlockCount = 0;
//! SharedState blocks.
//! Shared assignment blocks.
ZoneVector<RASharedAssignment> _sharedAssignments {};
//! Timestamp generator (incremental).
@@ -695,7 +704,7 @@ public:
//! Total number of physical registers.
uint32_t _physRegTotal = 0;
//! Indexes of a possible scratch registers that can be selected if necessary.
uint8_t _scratchRegIndexes[2] {};
Support::Array<uint8_t, 2> _scratchRegIndexes {};
//! Registers available for allocation.
RARegMask _availableRegs = RARegMask();
@@ -707,14 +716,14 @@ public:
//! Work registers (registers used by the function).
RAWorkRegs _workRegs;
//! Work registers per register group.
RAWorkRegs _workRegsOfGroup[BaseReg::kGroupVirt];
Support::Array<RAWorkRegs, Globals::kNumVirtGroups> _workRegsOfGroup;
//! Register allocation strategy per register group.
RAStrategy _strategy[BaseReg::kGroupVirt];
Support::Array<RAStrategy, Globals::kNumVirtGroups> _strategy;
//! Global max live-count (from all blocks) per register group.
RALiveCount _globalMaxLiveCount = RALiveCount();
//! Global live spans per register group.
LiveRegSpans* _globalLiveSpans[BaseReg::kGroupVirt] {};
Support::Array<LiveRegSpans*, Globals::kNumVirtGroups> _globalLiveSpans {};
//! Temporary stack slot.
Operand _temporaryMem = Operand();
@@ -734,7 +743,9 @@ public:
//! Temporary string builder used to format comments.
StringTmp<80> _tmpString;
//! \name Construction & Reset
//! \}
//! \name Construction & Destruction
//! \{
BaseRAPass() noexcept;
@@ -747,8 +758,14 @@ public:
//! Returns \ref Logger passed to \ref runOnFunction().
inline Logger* logger() const noexcept { return _logger; }
//! Returns \ref Logger passed to \ref runOnFunction() or null if `kOptionDebugPasses` is not set.
inline Logger* debugLogger() const noexcept { return _debugLogger; }
//! Returns either a valid logger if the given `option` is set and logging is enabled, or nullptr.
inline Logger* getLoggerIf(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option) ? _logger : nullptr; }
//! Returns whether the diagnostic `option` is enabled.
//!
//! \note Returns false if there is no logger (as diagnostics without logging make no sense).
inline bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
//! Returns \ref Zone passed to \ref runOnFunction().
inline Zone* zone() const noexcept { return _allocator.zone(); }
@@ -778,7 +795,7 @@ public:
//! \name Utilities
//! \{
inline void makeUnavailable(uint32_t group, uint32_t regId) noexcept {
inline void makeUnavailable(RegGroup group, uint32_t regId) noexcept {
_availableRegs[group] &= ~Support::bitMask(regId);
_availableRegCount[group]--;
}
@@ -829,12 +846,11 @@ public:
//! Returns the count of reachable basic blocks (returns size of `_pov` array).
inline uint32_t reachableBlockCount() const noexcept { return _pov.size(); }
//! Tests whether the CFG has dangling blocks - these were created by `newBlock()`,
//! but not added to CFG through `addBlocks()`. If `true` is returned and the
//! CFG is constructed it means that something is missing and it's incomplete.
//! Tests whether the CFG has dangling blocks - these were created by `newBlock()`, but not added to CFG through
//! `addBlocks()`. If `true` is returned and the CFG is constructed it means that something is missing and it's
//! incomplete.
//!
//! \note This is only used to check if the number of created blocks matches
//! the number of added blocks.
//! \note This is only used to check if the number of created blocks matches the number of added blocks.
inline bool hasDanglingBlocks() const noexcept { return _createdBlockCount != blockCount(); }
//! Gest a next timestamp to be used to mark CFG blocks.
@@ -842,31 +858,29 @@ public:
//! Createss a new `RABlock` instance.
//!
//! \note New blocks don't have ID assigned until they are added to the block
//! array by calling `addBlock()`.
//! \note New blocks don't have ID assigned until they are added to the block array by calling `addBlock()`.
RABlock* newBlock(BaseNode* initialNode = nullptr) noexcept;
//! Tries to find a neighboring LabelNode (without going through code) that is
//! already connected with `RABlock`. If no label is found then a new RABlock
//! is created and assigned to all possible labels in a backward direction.
//! Tries to find a neighboring LabelNode (without going through code) that is already connected with `RABlock`.
//! If no label is found then a new RABlock is created and assigned to all possible labels in a backward direction.
RABlock* newBlockOrExistingAt(LabelNode* cbLabel, BaseNode** stoppedAt = nullptr) noexcept;
//! Adds the given `block` to the block list and assign it a unique block id.
Error addBlock(RABlock* block) noexcept;
inline Error addExitBlock(RABlock* block) noexcept {
block->addFlags(RABlock::kFlagIsFuncExit);
block->addFlags(RABlockFlags::kIsFuncExit);
return _exits.append(allocator(), block);
}
ASMJIT_INLINE RAInst* newRAInst(RABlock* block, uint32_t flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
ASMJIT_FORCE_INLINE RAInst* newRAInst(RABlock* block, RATiedFlags flags, uint32_t tiedRegCount, const RARegMask& clobberedRegs) noexcept {
void* p = zone()->alloc(RAInst::sizeOf(tiedRegCount));
if (ASMJIT_UNLIKELY(!p))
return nullptr;
return new(p) RAInst(block, flags, tiedRegCount, clobberedRegs);
}
ASMJIT_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
ASMJIT_FORCE_INLINE Error assignRAInst(BaseNode* node, RABlock* block, RAInstBuilder& ib) noexcept {
uint32_t tiedRegCount = ib.tiedRegCount();
RAInst* raInst = newRAInst(block, ib.aggregatedFlags(), tiedRegCount, ib._clobbered);
@@ -874,7 +888,7 @@ public:
return DebugUtils::errored(kErrorOutOfMemory);
RARegIndex index;
uint32_t flagsFilter = ~ib.forbiddenFlags();
RATiedFlags flagsFilter = ~ib.forbiddenFlags();
index.buildIndexes(ib._count);
raInst->_tiedIndex = index;
@@ -885,15 +899,15 @@ public:
RAWorkReg* workReg = workRegById(tiedReg->workId());
workReg->resetTiedReg();
uint32_t group = workReg->group();
RegGroup group = workReg->group();
if (tiedReg->hasUseId()) {
block->addFlags(RABlock::kFlagHasFixedRegs);
block->addFlags(RABlockFlags::kHasFixedRegs);
raInst->_usedRegs[group] |= Support::bitMask(tiedReg->useId());
}
if (tiedReg->hasOutId()) {
block->addFlags(RABlock::kFlagHasFixedRegs);
block->addFlags(RABlockFlags::kHasFixedRegs);
}
RATiedReg& dst = raInst->_tiedRegs[index[group]++];
@@ -901,7 +915,7 @@ public:
dst._flags &= flagsFilter;
if (!tiedReg->isDuplicate())
dst._allocableRegs &= ~ib._used[group];
dst._useRegMask &= ~ib._used[group];
}
node->setPassData<RAInst>(raInst);
@@ -915,18 +929,15 @@ public:
//! Traverse the whole function and do the following:
//!
//! 1. Construct CFG (represented by `RABlock`) by populating `_blocks` and
//! `_exits`. Blocks describe the control flow of the function and contain
//! some additional information that is used by the register allocator.
//! 1. Construct CFG (represented by `RABlock`) by populating `_blocks` and `_exits`. Blocks describe the control
//! flow of the function and contain some additional information that is used by the register allocator.
//!
//! 2. Remove unreachable code immediately. This is not strictly necessary
//! for BaseCompiler itself as the register allocator cannot reach such
//! nodes, but keeping instructions that use virtual registers would fail
//! during instruction encoding phase (Assembler).
//! 2. Remove unreachable code immediately. This is not strictly necessary for BaseCompiler itself as the register
//! allocator cannot reach such nodes, but keeping instructions that use virtual registers would fail during
//! instruction encoding phase (Assembler).
//!
//! 3. `RAInst` is created for each `InstNode` or compatible. It contains
//! information that is essential for further analysis and register
//! allocation.
//! 3. `RAInst` is created for each `InstNode` or compatible. It contains information that is essential for further
//! analysis and register allocation.
//!
//! Use `RACFGBuilderT` template that provides the necessary boilerplate.
virtual Error buildCFG() noexcept = 0;
@@ -940,7 +951,7 @@ public:
//! \{
//! Constructs CFG views (only POV at the moment).
Error buildViews() noexcept;
Error buildCFGViews() noexcept;
//! \}
@@ -948,13 +959,11 @@ public:
//! \{
// Terminology:
// - A node `X` dominates a node `Z` if any path from the entry point to
// `Z` has to go through `X`.
// - A node `Z` post-dominates a node `X` if any path from `X` to the end
// of the graph has to go through `Z`.
// - A node `X` dominates a node `Z` if any path from the entry point to `Z` has to go through `X`.
// - A node `Z` post-dominates a node `X` if any path from `X` to the end of the graph has to go through `Z`.
//! Constructs a dominator-tree from CFG.
Error buildDominators() noexcept;
Error buildCFGDominators() noexcept;
bool _strictlyDominates(const RABlock* a, const RABlock* b) const noexcept;
const RABlock* _nearestCommonDominator(const RABlock* a, const RABlock* b) const noexcept;
@@ -974,17 +983,15 @@ public:
//! \name CFG - Utilities
//! \{
Error removeUnreachableBlocks() noexcept;
Error removeUnreachableCode() noexcept;
//! Returns `node` or some node after that is ideal for beginning a new block.
//! This function is mostly used after a conditional or unconditional jump to
//! select the successor node. In some cases the next node could be a label,
//! Returns `node` or some node after that is ideal for beginning a new block. This function is mostly used after
//! a conditional or unconditional jump to select the successor node. In some cases the next node could be a label,
//! which means it could have assigned some block already.
BaseNode* findSuccessorStartingAt(BaseNode* node) noexcept;
//! Returns `true` of the `node` can flow to `target` without reaching code
//! nor data. It's used to eliminate jumps to labels that are next right to
//! them.
//! Returns `true` of the `node` can flow to `target` without reaching code nor data. It's used to eliminate jumps
//! to labels that are next right to them.
bool isNextTo(BaseNode* node, BaseNode* target) noexcept;
//! \}
@@ -994,25 +1001,25 @@ public:
//! Returns a native size of the general-purpose register of the target architecture.
inline uint32_t registerSize() const noexcept { return _sp.size(); }
inline uint32_t availableRegCount(uint32_t group) const noexcept { return _availableRegCount[group]; }
inline uint32_t availableRegCount(RegGroup group) const noexcept { return _availableRegCount[group]; }
inline RAWorkReg* workRegById(uint32_t workId) const noexcept { return _workRegs[workId]; }
inline RAWorkRegs& workRegs() noexcept { return _workRegs; }
inline RAWorkRegs& workRegs(uint32_t group) noexcept { return _workRegsOfGroup[group]; }
inline RAWorkRegs& workRegs(RegGroup group) noexcept { return _workRegsOfGroup[group]; }
inline const RAWorkRegs& workRegs() const noexcept { return _workRegs; }
inline const RAWorkRegs& workRegs(uint32_t group) const noexcept { return _workRegsOfGroup[group]; }
inline const RAWorkRegs& workRegs(RegGroup group) const noexcept { return _workRegsOfGroup[group]; }
inline uint32_t workRegCount() const noexcept { return _workRegs.size(); }
inline uint32_t workRegCount(uint32_t group) const noexcept { return _workRegsOfGroup[group].size(); }
inline uint32_t workRegCount(RegGroup group) const noexcept { return _workRegsOfGroup[group].size(); }
inline void _buildPhysIndex() noexcept {
_physRegIndex.buildIndexes(_physRegCount);
_physRegTotal = uint32_t(_physRegIndex[BaseReg::kGroupVirt - 1]) +
uint32_t(_physRegCount[BaseReg::kGroupVirt - 1]) ;
_physRegTotal = uint32_t(_physRegIndex[RegGroup::kMaxVirt]) +
uint32_t(_physRegCount[RegGroup::kMaxVirt]) ;
}
inline uint32_t physRegIndex(uint32_t group) const noexcept { return _physRegIndex[group]; }
inline uint32_t physRegIndex(RegGroup group) const noexcept { return _physRegIndex[group]; }
inline uint32_t physRegTotal() const noexcept { return _physRegTotal; }
Error _asWorkReg(VirtReg* vReg, RAWorkReg** out) noexcept;
@@ -1024,7 +1031,7 @@ public:
return *out ? kErrorOk : _asWorkReg(vReg, out);
}
inline Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
ASMJIT_FORCE_INLINE Error virtIndexAsWorkReg(uint32_t vIndex, RAWorkReg** out) noexcept {
const ZoneVector<VirtReg*>& virtRegs = cc()->virtRegs();
if (ASMJIT_UNLIKELY(vIndex >= virtRegs.size()))
return DebugUtils::errored(kErrorInvalidVirtId);
@@ -1045,7 +1052,10 @@ public:
inline BaseMem workRegAsMem(RAWorkReg* workReg) noexcept {
getOrCreateStackSlot(workReg);
return BaseMem(BaseMem::Decomposed { _sp.type(), workReg->virtId(), BaseReg::kTypeNone, 0, 0, 0, BaseMem::kSignatureMemRegHomeFlag });
return BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_sp.type()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
workReg->virtId(), 0, 0);
}
WorkToPhysMap* newWorkToPhysMap() noexcept;
@@ -1085,7 +1095,7 @@ public:
//! Initializes data structures used for global live spans.
Error initGlobalLiveSpans() noexcept;
Error binPack(uint32_t group) noexcept;
Error binPack(RegGroup group) noexcept;
//! \}
@@ -1159,8 +1169,8 @@ public:
inline ZoneAllocator* RABlock::allocator() const noexcept { return _ra->allocator(); }
inline uint32_t RABlock::entryScratchGpRegs() const noexcept {
uint32_t regs = _entryScratchGpRegs;
inline RegMask RABlock::entryScratchGpRegs() const noexcept {
RegMask regs = _entryScratchGpRegs;
if (hasSharedAssignmentId())
regs = _ra->_sharedAssignments[_sharedAssignmentId].entryScratchGpRegs();
return regs;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_COMPILER
@@ -29,9 +11,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::RAStackAllocator - Slots]
// ============================================================================
// RAStackAllocator - Slots
// ========================
RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32_t alignment, uint32_t flags) noexcept {
if (ASMJIT_UNLIKELY(_slots.willGrow(allocator(), 1) != kErrorOk))
@@ -55,9 +36,8 @@ RAStackSlot* RAStackAllocator::newSlot(uint32_t baseRegId, uint32_t size, uint32
return slot;
}
// ============================================================================
// [asmjit::RAStackAllocator - Utilities]
// ============================================================================
// RAStackAllocator - Utilities
// ============================
struct RAStackGap {
inline RAStackGap() noexcept
@@ -82,10 +62,9 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
// STEP 1:
//
// Update usage based on the size of the slot. We boost smaller slots in a way
// that 32-bit register has higher priority than a 128-bit register, however,
// if one 128-bit register is used 4 times more than some other 32-bit register
// it will overweight it.
// Update usage based on the size of the slot. We boost smaller slots in a way that 32-bit register has higher
// priority than a 128-bit register, however, if one 128-bit register is used 4 times more than some other 32-bit
// register it will overweight it.
for (RAStackSlot* slot : _slots) {
uint32_t alignment = slot->alignment();
ASMJIT_ASSERT(alignment > 0);
@@ -98,8 +77,8 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
else
weight = power;
// If overflown, which has less chance of winning a lottery, just use max
// possible weight. In such case it probably doesn't matter at all.
// If overflown, which has less chance of winning a lottery, just use max possible weight. In such case it
// probably doesn't matter at all.
if (weight > 0xFFFFFFFFu)
weight = 0xFFFFFFFFu;
@@ -116,12 +95,11 @@ Error RAStackAllocator::calculateStackFrame() noexcept {
// STEP 3:
//
// Calculate offset of each slot. We start from the slot that has the highest
// weight and advance to slots with lower weight. It could look that offsets
// start from the first slot in our list and then simply increase, but it's
// not always the case as we also try to fill all gaps introduced by the fact
// that slots are sorted by weight and not by size & alignment, so when we need
// to align some slot we distribute the gap caused by the alignment to `gaps`.
// Calculate offset of each slot. We start from the slot that has the highest weight and advance to slots with
// lower weight. It could look that offsets start from the first slot in our list and then simply increase, but
// it's not always the case as we also try to fill all gaps introduced by the fact that slots are sorted by
// weight and not by size & alignment, so when we need to align some slot we distribute the gap caused by the
// alignment to `gaps`.
uint32_t offset = 0;
ZoneVector<RAStackGap> gaps[kSizeCount - 1];

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_RASTACK_P_H_INCLUDED
#define ASMJIT_CORE_RASTACK_P_H_INCLUDED
@@ -35,16 +17,12 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_ra
//! \{
// ============================================================================
// [asmjit::RAStackSlot]
// ============================================================================
//! Stack slot.
struct RAStackSlot {
//! Stack slot flags.
//!
//! TODO: kFlagStackArg is not used by the current implementation, do we need to keep it?
enum Flags : uint32_t {
enum Flags : uint16_t {
//! Stack slot is register home slot.
kFlagRegHome = 0x0001u,
//! Stack slot position matches argument passed via stack.
@@ -55,6 +33,9 @@ struct RAStackSlot {
kNoArgIndex = 0xFF
};
//! \name Members
//! \{
//! Base register used to address the stack.
uint8_t _baseRegId;
//! Minimum alignment required by the slot.
@@ -71,6 +52,8 @@ struct RAStackSlot {
//! Stack offset, calculated by \ref RAStackAllocator::calculateStackFrame().
int32_t _offset;
//! \}
//! \name Accessors
//! \{
@@ -101,10 +84,6 @@ struct RAStackSlot {
typedef ZoneVector<RAStackSlot*> RAStackSlots;
// ============================================================================
// [asmjit::RAStackAllocator]
// ============================================================================
//! Stack allocator.
class RAStackAllocator {
public:
@@ -121,6 +100,9 @@ public:
kSizeCount = 7
};
//! \name Members
//! \{
//! Allocator used to allocate internal data.
ZoneAllocator* _allocator;
//! Count of bytes used by all slots.
@@ -132,7 +114,9 @@ public:
//! Stack slots vector.
RAStackSlots _slots;
//! \name Construction / Destruction
//! \}
//! \name Construction & Destruction
//! \{
inline RAStackAllocator() noexcept

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/string.h"
@@ -27,18 +9,16 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::String - Globals]
// ============================================================================
// String - Globals
// ================
static const char String_baseN[] = "0123456789ABCDEF";
constexpr size_t kMinAllocSize = 64;
constexpr size_t kMaxAllocSize = SIZE_MAX - Globals::kGrowThreshold;
// ============================================================================
// [asmjit::String]
// ============================================================================
// String - Clear & Reset
// ======================
Error String::reset() noexcept {
if (_type == kTypeLarge)
@@ -60,7 +40,10 @@ Error String::clear() noexcept {
return kErrorOk;
}
char* String::prepare(uint32_t op, size_t size) noexcept {
// String - Prepare
// ================
char* String::prepare(ModifyOp op, size_t size) noexcept {
char* curData;
size_t curSize;
size_t curCapacity;
@@ -76,7 +59,7 @@ char* String::prepare(uint32_t op, size_t size) noexcept {
curCapacity = kSSOCapacity;
}
if (op == kOpAssign) {
if (op == ModifyOp::kAssign) {
if (size > curCapacity) {
// Prevent arithmetic overflow.
if (ASMJIT_UNLIKELY(size >= kMaxAllocSize))
@@ -150,6 +133,9 @@ char* String::prepare(uint32_t op, size_t size) noexcept {
}
}
// String - Assign
// ===============
Error String::assign(const char* data, size_t size) noexcept {
char* dst = nullptr;
@@ -210,11 +196,10 @@ Error String::assign(const char* data, size_t size) noexcept {
return kErrorOk;
}
// ============================================================================
// [asmjit::String - Operations]
// ============================================================================
// String - Operations
// ===================
Error String::_opString(uint32_t op, const char* str, size_t size) noexcept {
Error String::_opString(ModifyOp op, const char* str, size_t size) noexcept {
if (size == SIZE_MAX)
size = str ? strlen(str) : size_t(0);
@@ -229,7 +214,7 @@ Error String::_opString(uint32_t op, const char* str, size_t size) noexcept {
return kErrorOk;
}
Error String::_opChar(uint32_t op, char c) noexcept {
Error String::_opChar(ModifyOp op, char c) noexcept {
char* p = prepare(op, 1);
if (!p)
return DebugUtils::errored(kErrorOutOfMemory);
@@ -238,7 +223,7 @@ Error String::_opChar(uint32_t op, char c) noexcept {
return kErrorOk;
}
Error String::_opChars(uint32_t op, char c, size_t n) noexcept {
Error String::_opChars(ModifyOp op, char c, size_t n) noexcept {
if (!n)
return kErrorOk;
@@ -255,7 +240,7 @@ Error String::padEnd(size_t n, char c) noexcept {
return n > size ? appendChars(c, n - size) : kErrorOk;
}
Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, uint32_t flags) noexcept {
Error String::_opNumber(ModifyOp op, uint64_t i, uint32_t base, size_t width, StringFormatFlags flags) noexcept {
if (base == 0)
base = 10;
@@ -265,24 +250,22 @@ Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, ui
uint64_t orig = i;
char sign = '\0';
// --------------------------------------------------------------------------
// [Sign]
// --------------------------------------------------------------------------
// Format Sign
// -----------
if ((flags & kFormatSigned) != 0 && int64_t(i) < 0) {
if (Support::test(flags, StringFormatFlags::kSigned) && int64_t(i) < 0) {
i = uint64_t(-int64_t(i));
sign = '-';
}
else if ((flags & kFormatShowSign) != 0) {
else if (Support::test(flags, StringFormatFlags::kShowSign)) {
sign = '+';
}
else if ((flags & kFormatShowSpace) != 0) {
else if (Support::test(flags, StringFormatFlags::kShowSpace)) {
sign = ' ';
}
// --------------------------------------------------------------------------
// [Number]
// --------------------------------------------------------------------------
// Format Number
// -------------
switch (base) {
case 2:
@@ -320,11 +303,10 @@ Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, ui
size_t numberSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p);
// --------------------------------------------------------------------------
// [Alternate Form]
// --------------------------------------------------------------------------
// Alternate Form
// --------------
if ((flags & kFormatAlternate) != 0) {
if (Support::test(flags, StringFormatFlags::kAlternate)) {
if (base == 8) {
if (orig != 0)
*--p = '0';
@@ -335,9 +317,8 @@ Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, ui
}
}
// --------------------------------------------------------------------------
// [Width]
// --------------------------------------------------------------------------
// String Width
// ------------
if (sign != 0)
*--p = sign;
@@ -350,9 +331,8 @@ Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, ui
else
width -= numberSize;
// --------------------------------------------------------------------------
// Write]
// --------------------------------------------------------------------------
// Finalize
// --------
size_t prefixSize = (size_t)(buf + ASMJIT_ARRAY_SIZE(buf) - p) - numberSize;
char* data = prepare(op, prefixSize + width + numberSize);
@@ -370,7 +350,7 @@ Error String::_opNumber(uint32_t op, uint64_t i, uint32_t base, size_t width, ui
return kErrorOk;
}
Error String::_opHex(uint32_t op, const void* data, size_t size, char separator) noexcept {
Error String::_opHex(ModifyOp op, const void* data, size_t size, char separator) noexcept {
char* dst;
const uint8_t* src = static_cast<const uint8_t*>(data);
@@ -414,7 +394,7 @@ Error String::_opHex(uint32_t op, const void* data, size_t size, char separator)
return kErrorOk;
}
Error String::_opFormat(uint32_t op, const char* fmt, ...) noexcept {
Error String::_opFormat(ModifyOp op, const char* fmt, ...) noexcept {
Error err;
va_list ap;
@@ -425,8 +405,8 @@ Error String::_opFormat(uint32_t op, const char* fmt, ...) noexcept {
return err;
}
Error String::_opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept {
size_t startAt = (op == kOpAssign) ? size_t(0) : size();
Error String::_opVFormat(ModifyOp op, const char* fmt, va_list ap) noexcept {
size_t startAt = (op == ModifyOp::kAssign) ? size_t(0) : size();
size_t remainingCapacity = capacity() - startAt;
char buf[1024];
@@ -504,9 +484,8 @@ bool String::eq(const char* other, size_t size) const noexcept {
}
}
// ============================================================================
// [asmjit::Support - Unit]
// ============================================================================
// String - Tests
// ==============
#if defined(ASMJIT_TEST)
UNIT(core_string) {
@@ -566,7 +545,7 @@ UNIT(core_string) {
EXPECT(s.appendUInt(1234) == kErrorOk);
EXPECT(s.eq("1234") == true);
EXPECT(s.assignUInt(0xFFFF, 16, 0, String::kFormatAlternate) == kErrorOk);
EXPECT(s.assignUInt(0xFFFF, 16, 0, StringFormatFlags::kAlternate) == kErrorOk);
EXPECT(s.eq("0xFFFF"));
StringTmp<64> sTmp;

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_STRING_H_INCLUDED
#define ASMJIT_CORE_STRING_H_INCLUDED
@@ -32,20 +14,41 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_utilities
//! \{
// ============================================================================
// [asmjit::FixedString]
// ============================================================================
//! Format flags used by \ref String API.
enum class StringFormatFlags : uint32_t {
//! No flags.
kNone = 0x00000000u,
//! Show sign.
kShowSign = 0x00000001u,
//! Show space.
kShowSpace = 0x00000002u,
//! Alternate form (use 0x when formatting HEX number).
kAlternate = 0x00000004u,
//! The input is signed.
kSigned = 0x80000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(StringFormatFlags)
//! A fixed string - only useful for strings that would never exceed `N - 1`
//! characters; always null-terminated.
//! Fixed string - only useful for strings that would never exceed `N - 1` characters; always null-terminated.
template<size_t N>
union FixedString {
//! \name Constants
//! \{
// This cannot be constexpr as GCC 4.8 refuses constexpr members of unions.
enum : uint32_t {
kNumU32 = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
kNumUInt32Words = uint32_t((N + sizeof(uint32_t) - 1) / sizeof(uint32_t))
};
char str[kNumU32 * sizeof(uint32_t)];
uint32_t u32[kNumU32];
//! \}
//! \name Members
//! \{
char str[kNumUInt32Words * sizeof(uint32_t)];
uint32_t u32[kNumUInt32Words];
//! \}
//! \name Utilities
//! \{
@@ -56,46 +59,31 @@ union FixedString {
//! \}
};
// ============================================================================
// [asmjit::String]
// ============================================================================
//! A simple non-reference counted string that uses small string optimization (SSO).
//!
//! This string has 3 allocation possibilities:
//!
//! 1. Small - embedded buffer is used for up to `kSSOCapacity` characters.
//! This should handle most small strings and thus avoid dynamic
//! memory allocation for most use-cases.
//! 1. Small - embedded buffer is used for up to `kSSOCapacity` characters. This should handle most small
//! strings and thus avoid dynamic memory allocation for most use-cases.
//!
//! 2. Large - string that doesn't fit into an embedded buffer (or string
//! that was truncated from a larger buffer) and is owned by
//! AsmJit. When you destroy the string AsmJit would automatically
//! 2. Large - string that doesn't fit into an embedded buffer (or string that was truncated from a larger
//! buffer) and is owned by AsmJit. When you destroy the string AsmJit would automatically
//! release the large buffer.
//!
//! 3. External - like Large (2), however, the large buffer is not owned by
//! AsmJit and won't be released when the string is destroyed
//! or reallocated. This is mostly useful for working with
//! larger temporary strings allocated on stack or with immutable
//! strings.
//! 3. External - like Large (2), however, the large buffer is not owned by AsmJit and won't be released when
//! the string is destroyed or reallocated. This is mostly useful for working with larger temporary
//! strings allocated on stack or with immutable strings.
class String {
public:
ASMJIT_NONCOPYABLE(String)
//! String operation.
enum Op : uint32_t {
enum class ModifyOp : uint32_t {
//! Assignment - a new content replaces the current one.
kOpAssign = 0,
kAssign = 0,
//! Append - a new content is appended to the string.
kOpAppend = 1
};
//! String format flags.
enum FormatFlags : uint32_t {
kFormatShowSign = 0x00000001u,
kFormatShowSpace = 0x00000002u,
kFormatAlternate = 0x00000004u,
kFormatSigned = 0x80000000u
kAppend = 1
};
//! \cond INTERNAL
@@ -106,8 +94,10 @@ public:
//! String type.
enum Type : uint8_t {
kTypeLarge = 0x1Fu, //!< Large string (owned by String).
kTypeExternal = 0x20u //!< External string (zone allocated or not owned by String).
//! Large string (owned by String).
kTypeLarge = 0x1Fu,
//! External string (zone allocated or not owned by String).
kTypeExternal = 0x20u
};
union Raw {
@@ -213,15 +203,15 @@ public:
//! Clears the content of the string.
ASMJIT_API Error clear() noexcept;
ASMJIT_API char* prepare(uint32_t op, size_t size) noexcept;
ASMJIT_API char* prepare(ModifyOp op, size_t size) noexcept;
ASMJIT_API Error _opString(uint32_t op, const char* str, size_t size = SIZE_MAX) noexcept;
ASMJIT_API Error _opChar(uint32_t op, char c) noexcept;
ASMJIT_API Error _opChars(uint32_t op, char c, size_t n) noexcept;
ASMJIT_API Error _opNumber(uint32_t op, uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept;
ASMJIT_API Error _opHex(uint32_t op, const void* data, size_t size, char separator = '\0') noexcept;
ASMJIT_API Error _opFormat(uint32_t op, const char* fmt, ...) noexcept;
ASMJIT_API Error _opVFormat(uint32_t op, const char* fmt, va_list ap) noexcept;
ASMJIT_API Error _opString(ModifyOp op, const char* str, size_t size = SIZE_MAX) noexcept;
ASMJIT_API Error _opChar(ModifyOp op, char c) noexcept;
ASMJIT_API Error _opChars(ModifyOp op, char c, size_t n) noexcept;
ASMJIT_API Error _opNumber(ModifyOp op, uint64_t i, uint32_t base = 0, size_t width = 0, StringFormatFlags flags = StringFormatFlags::kNone) noexcept;
ASMJIT_API Error _opHex(ModifyOp op, const void* data, size_t size, char separator = '\0') noexcept;
ASMJIT_API Error _opFormat(ModifyOp op, const char* fmt, ...) noexcept;
ASMJIT_API Error _opVFormat(ModifyOp op, const char* fmt, va_list ap) noexcept;
//! Replaces the current of the string with `data` of the given `size`.
//!
@@ -235,45 +225,45 @@ public:
//! Replaces the current of the string by a single `c` character.
inline Error assign(char c) noexcept {
return _opChar(kOpAssign, c);
return _opChar(ModifyOp::kAssign, c);
}
//! Replaces the current of the string by a `c` character, repeated `n` times.
inline Error assignChars(char c, size_t n) noexcept {
return _opChars(kOpAssign, c, n);
return _opChars(ModifyOp::kAssign, c, n);
}
//! Replaces the current of the string by a formatted integer `i` (signed).
inline Error assignInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAssign, uint64_t(i), base, width, flags | kFormatSigned);
inline Error assignInt(int64_t i, uint32_t base = 0, size_t width = 0, StringFormatFlags flags = StringFormatFlags::kNone) noexcept {
return _opNumber(ModifyOp::kAssign, uint64_t(i), base, width, flags | StringFormatFlags::kSigned);
}
//! Replaces the current of the string by a formatted integer `i` (unsigned).
inline Error assignUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAssign, i, base, width, flags);
inline Error assignUInt(uint64_t i, uint32_t base = 0, size_t width = 0, StringFormatFlags flags = StringFormatFlags::kNone) noexcept {
return _opNumber(ModifyOp::kAssign, i, base, width, flags);
}
//! Replaces the current of the string by the given `data` converted to a HEX string.
inline Error assignHex(const void* data, size_t size, char separator = '\0') noexcept {
return _opHex(kOpAssign, data, size, separator);
return _opHex(ModifyOp::kAssign, data, size, separator);
}
//! Replaces the current of the string by a formatted string `fmt`.
template<typename... Args>
inline Error assignFormat(const char* fmt, Args&&... args) noexcept {
return _opFormat(kOpAssign, fmt, std::forward<Args>(args)...);
return _opFormat(ModifyOp::kAssign, fmt, std::forward<Args>(args)...);
}
//! Replaces the current of the string by a formatted string `fmt` (va_list version).
inline Error assignVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kOpAssign, fmt, ap);
return _opVFormat(ModifyOp::kAssign, fmt, ap);
}
//! Appends `str` having the given size `size` to the string.
//!
//! Null terminated strings can set `size` to `SIZE_MAX`.
inline Error append(const char* str, size_t size = SIZE_MAX) noexcept {
return _opString(kOpAppend, str, size);
return _opString(ModifyOp::kAppend, str, size);
}
//! Appends `other` string to this string.
@@ -283,38 +273,38 @@ public:
//! Appends a single `c` character.
inline Error append(char c) noexcept {
return _opChar(kOpAppend, c);
return _opChar(ModifyOp::kAppend, c);
}
//! Appends `c` character repeated `n` times.
inline Error appendChars(char c, size_t n) noexcept {
return _opChars(kOpAppend, c, n);
return _opChars(ModifyOp::kAppend, c, n);
}
//! Appends a formatted integer `i` (signed).
inline Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAppend, uint64_t(i), base, width, flags | kFormatSigned);
inline Error appendInt(int64_t i, uint32_t base = 0, size_t width = 0, StringFormatFlags flags = StringFormatFlags::kNone) noexcept {
return _opNumber(ModifyOp::kAppend, uint64_t(i), base, width, flags | StringFormatFlags::kSigned);
}
//! Appends a formatted integer `i` (unsigned).
inline Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, uint32_t flags = 0) noexcept {
return _opNumber(kOpAppend, i, base, width, flags);
inline Error appendUInt(uint64_t i, uint32_t base = 0, size_t width = 0, StringFormatFlags flags = StringFormatFlags::kNone) noexcept {
return _opNumber(ModifyOp::kAppend, i, base, width, flags);
}
//! Appends the given `data` converted to a HEX string.
inline Error appendHex(const void* data, size_t size, char separator = '\0') noexcept {
return _opHex(kOpAppend, data, size, separator);
return _opHex(ModifyOp::kAppend, data, size, separator);
}
//! Appends a formatted string `fmt` with `args`.
template<typename... Args>
inline Error appendFormat(const char* fmt, Args&&... args) noexcept {
return _opFormat(kOpAppend, fmt, std::forward<Args>(args)...);
return _opFormat(ModifyOp::kAppend, fmt, std::forward<Args>(args)...);
}
//! Appends a formatted string `fmt` (va_list version).
inline Error appendVFormat(const char* fmt, va_list ap) noexcept {
return _opVFormat(kOpAppend, fmt, ap);
return _opVFormat(ModifyOp::kAppend, fmt, ap);
}
ASMJIT_API Error padEnd(size_t n, char c = ' ') noexcept;
@@ -332,8 +322,8 @@ public:
//! Resets string to embedded and makes it empty (zero length, zero first char)
//!
//! \note This is always called internally after an external buffer was released
//! as it zeroes all bytes used by String's embedded storage.
//! \note This is always called internally after an external buffer was released as it zeroes all bytes
//! used by String's embedded storage.
inline void _resetInternal() noexcept {
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_raw.uptr); i++)
_raw.uptr[i] = 0;
@@ -347,26 +337,8 @@ public:
}
//! \}
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use assign() instead of assignString()")
inline Error assignString(const char* data, size_t size = SIZE_MAX) noexcept { return assign(data, size); }
ASMJIT_DEPRECATED("Use assign() instead of assignChar()")
inline Error assignChar(char c) noexcept { return assign(c); }
ASMJIT_DEPRECATED("Use append() instead of appendString()")
inline Error appendString(const char* data, size_t size = SIZE_MAX) noexcept { return append(data, size); }
ASMJIT_DEPRECATED("Use append() instead of appendChar()")
inline Error appendChar(char c) noexcept { return append(c); }
#endif // !ASMJIT_NO_DEPRECATED
};
// ============================================================================
// [asmjit::StringTmp]
// ============================================================================
//! Temporary string builder, has statically allocated `N` bytes.
template<size_t N>
class StringTmp : public String {

View File

@@ -1,34 +1,15 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Support - Unit]
// ============================================================================
// Support - Tests
// ===============
#if defined(ASMJIT_TEST)
template<typename T>
@@ -85,10 +66,14 @@ static void testBitUtils() noexcept {
for (i = 0; i < 63; i++) EXPECT(Support::blsi(uint64_t(3) << i) == uint64_t(1) << i);
INFO("Support::ctz()");
for (i = 0; i < 32; i++) EXPECT(Support::Internal::clzFallback(uint32_t(1) << i) == 31 - i);
for (i = 0; i < 64; i++) EXPECT(Support::Internal::clzFallback(uint64_t(1) << i) == 63 - i);
for (i = 0; i < 32; i++) EXPECT(Support::Internal::ctzFallback(uint32_t(1) << i) == i);
for (i = 0; i < 64; i++) EXPECT(Support::Internal::ctzFallback(uint64_t(1) << i) == i);
for (i = 0; i < 32; i++) EXPECT(Support::clz(uint32_t(1) << i) == 31 - i);
for (i = 0; i < 64; i++) EXPECT(Support::clz(uint64_t(1) << i) == 63 - i);
for (i = 0; i < 32; i++) EXPECT(Support::ctz(uint32_t(1) << i) == i);
for (i = 0; i < 64; i++) EXPECT(Support::ctz(uint64_t(1) << i) == i);
for (i = 0; i < 32; i++) EXPECT(Support::constCtz(uint32_t(1) << i) == i);
for (i = 0; i < 64; i++) EXPECT(Support::constCtz(uint64_t(1) << i) == i);
INFO("Support::bitMask()");
EXPECT(Support::bitMask(0, 1, 7) == 0x83u);

File diff suppressed because it is too large Load Diff

View File

@@ -1,37 +1,14 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/target.h"
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Target - Construction / Destruction]
// ============================================================================
Target::Target() noexcept
: _environment() {}
Target::Target() noexcept : _environment() {}
Target::~Target() noexcept {}
ASMJIT_END_NAMESPACE

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_TARGET_H_INCLUDED
#define ASMJIT_CORE_TARGET_H_INCLUDED
@@ -32,98 +14,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::CodeInfo]
// ============================================================================
#ifndef ASMJIT_NO_DEPRECATED
//! Basic information about a code (or target). It describes its architecture,
//! code generation mode (or optimization level), and base address.
class ASMJIT_DEPRECATED_STRUCT("Use Environment instead of CodeInfo") CodeInfo {
public:
//!< Environment information.
Environment _environment;
//! Base address.
uint64_t _baseAddress;
//! \name Construction & Destruction
//! \{
inline CodeInfo() noexcept
: _environment(),
_baseAddress(Globals::kNoBaseAddress) {}
inline explicit CodeInfo(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _environment(arch, subArch),
_baseAddress(baseAddress) {}
inline explicit CodeInfo(const Environment& environment, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept
: _environment(environment),
_baseAddress(baseAddress) {}
inline CodeInfo(const CodeInfo& other) noexcept { init(other); }
inline bool isInitialized() const noexcept {
return _environment.arch() != Environment::kArchUnknown;
}
inline void init(const CodeInfo& other) noexcept {
*this = other;
}
inline void init(uint32_t arch, uint32_t subArch = 0, uint64_t baseAddress = Globals::kNoBaseAddress) noexcept {
_environment.init(arch, subArch);
_baseAddress = baseAddress;
}
inline void reset() noexcept {
_environment.reset();
_baseAddress = Globals::kNoBaseAddress;
}
//! \}
//! \name Overloaded Operators
//! \{
inline CodeInfo& operator=(const CodeInfo& other) noexcept = default;
inline bool operator==(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) == 0; }
inline bool operator!=(const CodeInfo& other) const noexcept { return ::memcmp(this, &other, sizeof(*this)) != 0; }
//! \}
//! \name Accessors
//! \{
//! Returns the target environment information, see \ref Environment.
inline const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _environment.subArch(); }
//! Returns the native size of the target's architecture GP register.
inline uint32_t gpSize() const noexcept { return _environment.registerSize(); }
//! Tests whether this CodeInfo has a base address set.
inline bool hasBaseAddress() const noexcept { return _baseAddress != Globals::kNoBaseAddress; }
//! Returns the base address or \ref Globals::kNoBaseAddress if it's not set.
inline uint64_t baseAddress() const noexcept { return _baseAddress; }
//! Sets base address to `p`.
inline void setBaseAddress(uint64_t p) noexcept { _baseAddress = p; }
//! Resets base address (implicitly sets it to \ref Globals::kNoBaseAddress).
inline void resetBaseAddress() noexcept { _baseAddress = Globals::kNoBaseAddress; }
//! \}
};
#endif // !ASMJIT_NO_DEPRECATED
// ============================================================================
// [asmjit::Target]
// ============================================================================
//! Target is an abstract class that describes a machine code target.
class ASMJIT_VIRTAPI Target {
public:
@@ -146,24 +36,12 @@ public:
//! \name Accessors
//! \{
//! Returns CodeInfo of this target.
//!
//! CodeInfo can be used to setup a CodeHolder in case you plan to generate a
//! code compatible and executable by this Runtime.
//! Returns target's environment.
inline const Environment& environment() const noexcept { return _environment; }
//! Returns the target architecture, see \ref Environment::Arch.
inline uint32_t arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture, see \ref Environment::SubArch.
inline uint32_t subArch() const noexcept { return _environment.subArch(); }
#ifndef ASMJIT_NO_DEPRECATED
ASMJIT_DEPRECATED("Use environment() instead")
inline CodeInfo codeInfo() const noexcept { return CodeInfo(_environment); }
ASMJIT_DEPRECATED("Use environment().format() instead")
inline uint32_t targetType() const noexcept { return _environment.format(); }
#endif // !ASMJIT_NO_DEPRECATED
//! Returns the target architecture.
inline Arch arch() const noexcept { return _environment.arch(); }
//! Returns the target sub-architecture.
inline SubArch subArch() const noexcept { return _environment.subArch(); }
//! \}
};

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/misc_p.h"
@@ -27,58 +9,58 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Type]
// ============================================================================
namespace TypeUtils {
namespace Type {
template<uint32_t TYPE_ID>
struct BaseOfTypeId {
static constexpr uint32_t kTypeId =
isBase (TYPE_ID) ? TYPE_ID :
isMask8 (TYPE_ID) ? kIdU8 :
isMask16(TYPE_ID) ? kIdU16 :
isMask32(TYPE_ID) ? kIdU32 :
isMask64(TYPE_ID) ? kIdU64 :
isMmx32 (TYPE_ID) ? kIdI32 :
isMmx64 (TYPE_ID) ? kIdI64 :
isVec32 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec32Start :
isVec64 (TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec64Start :
isVec128(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec128Start :
isVec256(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec256Start :
isVec512(TYPE_ID) ? TYPE_ID + kIdI8 - _kIdVec512Start : 0;
template<uint32_t Index>
struct ScalarOfTypeId {
enum : uint32_t {
kTypeId = uint32_t(
isScalar(TypeId(Index)) ? TypeId(Index) :
isMask8 (TypeId(Index)) ? TypeId::kUInt8 :
isMask16(TypeId(Index)) ? TypeId::kUInt16 :
isMask32(TypeId(Index)) ? TypeId::kUInt32 :
isMask64(TypeId(Index)) ? TypeId::kUInt64 :
isMmx32 (TypeId(Index)) ? TypeId::kUInt32 :
isMmx64 (TypeId(Index)) ? TypeId::kUInt64 :
isVec32 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec32Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec64 (TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec64Start ) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec128(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec128Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec256(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec256Start) + uint32_t(TypeId::kInt8)) & 0xFF) :
isVec512(TypeId(Index)) ? TypeId((Index - uint32_t(TypeId::_kVec512Start) + uint32_t(TypeId::kInt8)) & 0xFF) : TypeId::kVoid)
};
};
template<uint32_t TYPE_ID>
template<uint32_t Index>
struct SizeOfTypeId {
static constexpr uint32_t kTypeSize =
isInt8 (TYPE_ID) ? 1 :
isUInt8 (TYPE_ID) ? 1 :
isInt16 (TYPE_ID) ? 2 :
isUInt16 (TYPE_ID) ? 2 :
isInt32 (TYPE_ID) ? 4 :
isUInt32 (TYPE_ID) ? 4 :
isInt64 (TYPE_ID) ? 8 :
isUInt64 (TYPE_ID) ? 8 :
isFloat32(TYPE_ID) ? 4 :
isFloat64(TYPE_ID) ? 8 :
isFloat80(TYPE_ID) ? 10 :
isMask8 (TYPE_ID) ? 1 :
isMask16 (TYPE_ID) ? 2 :
isMask32 (TYPE_ID) ? 4 :
isMask64 (TYPE_ID) ? 8 :
isMmx32 (TYPE_ID) ? 4 :
isMmx64 (TYPE_ID) ? 8 :
isVec32 (TYPE_ID) ? 4 :
isVec64 (TYPE_ID) ? 8 :
isVec128 (TYPE_ID) ? 16 :
isVec256 (TYPE_ID) ? 32 :
isVec512 (TYPE_ID) ? 64 : 0;
enum : uint32_t {
kTypeSize =
isInt8 (TypeId(Index)) ? 1 :
isUInt8 (TypeId(Index)) ? 1 :
isInt16 (TypeId(Index)) ? 2 :
isUInt16 (TypeId(Index)) ? 2 :
isInt32 (TypeId(Index)) ? 4 :
isUInt32 (TypeId(Index)) ? 4 :
isInt64 (TypeId(Index)) ? 8 :
isUInt64 (TypeId(Index)) ? 8 :
isFloat32(TypeId(Index)) ? 4 :
isFloat64(TypeId(Index)) ? 8 :
isFloat80(TypeId(Index)) ? 10 :
isMask8 (TypeId(Index)) ? 1 :
isMask16 (TypeId(Index)) ? 2 :
isMask32 (TypeId(Index)) ? 4 :
isMask64 (TypeId(Index)) ? 8 :
isMmx32 (TypeId(Index)) ? 4 :
isMmx64 (TypeId(Index)) ? 8 :
isVec32 (TypeId(Index)) ? 4 :
isVec64 (TypeId(Index)) ? 8 :
isVec128 (TypeId(Index)) ? 16 :
isVec256 (TypeId(Index)) ? 32 :
isVec512 (TypeId(Index)) ? 64 : 0
};
};
const TypeData _typeData = {
#define VALUE(x) BaseOfTypeId<x>::kTypeId
#define VALUE(x) TypeId(ScalarOfTypeId<x>::kTypeId)
{ ASMJIT_LOOKUP_TABLE_256(VALUE, 0) },
#undef VALUE
@@ -87,6 +69,6 @@ const TypeData _typeData = {
#undef VALUE
};
} // {Type}
} // {TypeUtils}
ASMJIT_END_NAMESPACE

View File

@@ -1,210 +1,237 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_TYPE_H_INCLUDED
#define ASMJIT_CORE_TYPE_H_INCLUDED
#include "../core/globals.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
// ============================================================================
// [asmjit::Type]
// ============================================================================
//! Provides a minimalist type-system that is used by Asmjit library.
namespace Type {
//! TypeId.
//! Type identifier provides a minimalist type system used across AsmJit library.
//!
//! This is an additional information that can be used to describe a value-type
//! of physical or virtual register. it's used mostly by BaseCompiler to describe
//! register representation (the group of data stored in the register and the
//! width used) and it's also used by APIs that allow to describe and work with
//! function signatures.
enum Id : uint32_t {
kIdVoid = 0, //!< Void type.
//! This is an additional information that can be used to describe a value-type of physical or virtual register. It's
//! used mostly by BaseCompiler to describe register representation (the group of data stored in the register and the
//! width used) and it's also used by APIs that allow to describe and work with function signatures.
enum class TypeId : uint8_t {
//! Void type.
kVoid = 0,
_kIdBaseStart = 32,
_kIdBaseEnd = 44,
_kBaseStart = 32,
_kBaseEnd = 44,
_kIdIntStart = 32,
_kIdIntEnd = 41,
_kIntStart = 32,
_kIntEnd = 41,
kIdIntPtr = 32, //!< Abstract signed integer type that has a native size.
kIdUIntPtr = 33, //!< Abstract unsigned integer type that has a native size.
//! Abstract signed integer type that has a native size.
kIntPtr = 32,
//! Abstract unsigned integer type that has a native size.
kUIntPtr = 33,
kIdI8 = 34, //!< 8-bit signed integer type.
kIdU8 = 35, //!< 8-bit unsigned integer type.
kIdI16 = 36, //!< 16-bit signed integer type.
kIdU16 = 37, //!< 16-bit unsigned integer type.
kIdI32 = 38, //!< 32-bit signed integer type.
kIdU32 = 39, //!< 32-bit unsigned integer type.
kIdI64 = 40, //!< 64-bit signed integer type.
kIdU64 = 41, //!< 64-bit unsigned integer type.
//! 8-bit signed integer type.
kInt8 = 34,
//! 8-bit unsigned integer type.
kUInt8 = 35,
//! 16-bit signed integer type.
kInt16 = 36,
//! 16-bit unsigned integer type.
kUInt16 = 37,
//! 32-bit signed integer type.
kInt32 = 38,
//! 32-bit unsigned integer type.
kUInt32 = 39,
//! 64-bit signed integer type.
kInt64 = 40,
//! 64-bit unsigned integer type.
kUInt64 = 41,
_kIdFloatStart = 42,
_kIdFloatEnd = 44,
_kFloatStart = 42,
_kFloatEnd = 44,
kIdF32 = 42, //!< 32-bit floating point type.
kIdF64 = 43, //!< 64-bit floating point type.
kIdF80 = 44, //!< 80-bit floating point type.
//! 32-bit floating point type.
kFloat32 = 42,
//! 64-bit floating point type.
kFloat64 = 43,
//! 80-bit floating point type.
kFloat80 = 44,
_kIdMaskStart = 45,
_kIdMaskEnd = 48,
_kMaskStart = 45,
_kMaskEnd = 48,
kIdMask8 = 45, //!< 8-bit opmask register (K).
kIdMask16 = 46, //!< 16-bit opmask register (K).
kIdMask32 = 47, //!< 32-bit opmask register (K).
kIdMask64 = 48, //!< 64-bit opmask register (K).
//! 8-bit opmask register (K).
kMask8 = 45,
//! 16-bit opmask register (K).
kMask16 = 46,
//! 32-bit opmask register (K).
kMask32 = 47,
//! 64-bit opmask register (K).
kMask64 = 48,
_kIdMmxStart = 49,
_kIdMmxEnd = 50,
_kMmxStart = 49,
_kMmxEnd = 50,
kIdMmx32 = 49, //!< 64-bit MMX register only used for 32 bits.
kIdMmx64 = 50, //!< 64-bit MMX register.
//! 64-bit MMX register only used for 32 bits.
kMmx32 = 49,
//! 64-bit MMX register.
kMmx64 = 50,
_kIdVec32Start = 51,
_kIdVec32End = 60,
_kVec32Start = 51,
_kVec32End = 60,
kIdI8x4 = 51,
kIdU8x4 = 52,
kIdI16x2 = 53,
kIdU16x2 = 54,
kIdI32x1 = 55,
kIdU32x1 = 56,
kIdF32x1 = 59,
kInt8x4 = 51,
kUInt8x4 = 52,
kInt16x2 = 53,
kUInt16x2 = 54,
kInt32x1 = 55,
kUInt32x1 = 56,
kFloat32x1 = 59,
_kIdVec64Start = 61,
_kIdVec64End = 70,
_kVec64Start = 61,
_kVec64End = 70,
kIdI8x8 = 61,
kIdU8x8 = 62,
kIdI16x4 = 63,
kIdU16x4 = 64,
kIdI32x2 = 65,
kIdU32x2 = 66,
kIdI64x1 = 67,
kIdU64x1 = 68,
kIdF32x2 = 69,
kIdF64x1 = 70,
kInt8x8 = 61,
kUInt8x8 = 62,
kInt16x4 = 63,
kUInt16x4 = 64,
kInt32x2 = 65,
kUInt32x2 = 66,
kInt64x1 = 67,
kUInt64x1 = 68,
kFloat32x2 = 69,
kFloat64x1 = 70,
_kIdVec128Start = 71,
_kIdVec128End = 80,
_kVec128Start = 71,
_kVec128End = 80,
kIdI8x16 = 71,
kIdU8x16 = 72,
kIdI16x8 = 73,
kIdU16x8 = 74,
kIdI32x4 = 75,
kIdU32x4 = 76,
kIdI64x2 = 77,
kIdU64x2 = 78,
kIdF32x4 = 79,
kIdF64x2 = 80,
kInt8x16 = 71,
kUInt8x16 = 72,
kInt16x8 = 73,
kUInt16x8 = 74,
kInt32x4 = 75,
kUInt32x4 = 76,
kInt64x2 = 77,
kUInt64x2 = 78,
kFloat32x4 = 79,
kFloat64x2 = 80,
_kIdVec256Start = 81,
_kIdVec256End = 90,
_kVec256Start = 81,
_kVec256End = 90,
kIdI8x32 = 81,
kIdU8x32 = 82,
kIdI16x16 = 83,
kIdU16x16 = 84,
kIdI32x8 = 85,
kIdU32x8 = 86,
kIdI64x4 = 87,
kIdU64x4 = 88,
kIdF32x8 = 89,
kIdF64x4 = 90,
kInt8x32 = 81,
kUInt8x32 = 82,
kInt16x16 = 83,
kUInt16x16 = 84,
kInt32x8 = 85,
kUInt32x8 = 86,
kInt64x4 = 87,
kUInt64x4 = 88,
kFloat32x8 = 89,
kFloat64x4 = 90,
_kIdVec512Start = 91,
_kIdVec512End = 100,
_kVec512Start = 91,
_kVec512End = 100,
kIdI8x64 = 91,
kIdU8x64 = 92,
kIdI16x32 = 93,
kIdU16x32 = 94,
kIdI32x16 = 95,
kIdU32x16 = 96,
kIdI64x8 = 97,
kIdU64x8 = 98,
kIdF32x16 = 99,
kIdF64x8 = 100,
kInt8x64 = 91,
kUInt8x64 = 92,
kInt16x32 = 93,
kUInt16x32 = 94,
kInt32x16 = 95,
kUInt32x16 = 96,
kInt64x8 = 97,
kUInt64x8 = 98,
kFloat32x16 = 99,
kFloat64x8 = 100,
kIdCount = 101,
kIdMax = 255
kLastAssigned = kFloat64x8,
kMaxValue = 255
};
ASMJIT_DEFINE_ENUM_COMPARE(TypeId)
//! Type identifier utilities.
namespace TypeUtils {
struct TypeData {
uint8_t baseOf[kIdMax + 1];
uint8_t sizeOf[kIdMax + 1];
TypeId scalarOf[uint32_t(TypeId::kMaxValue) + 1];
uint8_t sizeOf[uint32_t(TypeId::kMaxValue) + 1];
};
ASMJIT_VARAPI const TypeData _typeData;
static constexpr bool isVoid(uint32_t typeId) noexcept { return typeId == 0; }
static constexpr bool isValid(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdVec512End; }
static constexpr bool isBase(uint32_t typeId) noexcept { return typeId >= _kIdBaseStart && typeId <= _kIdBaseEnd; }
static constexpr bool isAbstract(uint32_t typeId) noexcept { return typeId >= kIdIntPtr && typeId <= kIdUIntPtr; }
//! Returns the scalar type of `typeId`.
static inline TypeId scalarOf(TypeId typeId) noexcept { return _typeData.scalarOf[uint32_t(typeId)]; }
static constexpr bool isInt(uint32_t typeId) noexcept { return typeId >= _kIdIntStart && typeId <= _kIdIntEnd; }
static constexpr bool isInt8(uint32_t typeId) noexcept { return typeId == kIdI8; }
static constexpr bool isUInt8(uint32_t typeId) noexcept { return typeId == kIdU8; }
static constexpr bool isInt16(uint32_t typeId) noexcept { return typeId == kIdI16; }
static constexpr bool isUInt16(uint32_t typeId) noexcept { return typeId == kIdU16; }
static constexpr bool isInt32(uint32_t typeId) noexcept { return typeId == kIdI32; }
static constexpr bool isUInt32(uint32_t typeId) noexcept { return typeId == kIdU32; }
static constexpr bool isInt64(uint32_t typeId) noexcept { return typeId == kIdI64; }
static constexpr bool isUInt64(uint32_t typeId) noexcept { return typeId == kIdU64; }
//! Returns the size [in bytes] of `typeId`.
static inline uint32_t sizeOf(TypeId typeId) noexcept { return _typeData.sizeOf[uint32_t(typeId)]; }
static constexpr bool isGp8(uint32_t typeId) noexcept { return typeId >= kIdI8 && typeId <= kIdU8; }
static constexpr bool isGp16(uint32_t typeId) noexcept { return typeId >= kIdI16 && typeId <= kIdU16; }
static constexpr bool isGp32(uint32_t typeId) noexcept { return typeId >= kIdI32 && typeId <= kIdU32; }
static constexpr bool isGp64(uint32_t typeId) noexcept { return typeId >= kIdI64 && typeId <= kIdU64; }
//! Tests whether a given type `typeId` is between `a` and `b`.
static inline constexpr bool isBetween(TypeId typeId, TypeId a, TypeId b) noexcept {
return Support::isBetween(uint32_t(typeId), uint32_t(a), uint32_t(b));
}
static constexpr bool isFloat(uint32_t typeId) noexcept { return typeId >= _kIdFloatStart && typeId <= _kIdFloatEnd; }
static constexpr bool isFloat32(uint32_t typeId) noexcept { return typeId == kIdF32; }
static constexpr bool isFloat64(uint32_t typeId) noexcept { return typeId == kIdF64; }
static constexpr bool isFloat80(uint32_t typeId) noexcept { return typeId == kIdF80; }
//! Tests whether a given type `typeId` is \ref TypeId::kVoid.
static inline constexpr bool isVoid(TypeId typeId) noexcept { return typeId == TypeId::kVoid; }
//! Tests whether a given type `typeId` is a valid non-void type.
static inline constexpr bool isValid(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kVec512End); }
//! Tests whether a given type `typeId` is scalar (has no vector part).
static inline constexpr bool isScalar(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kBaseStart, TypeId::_kBaseEnd); }
//! Tests whether a given type `typeId` is abstract, which means that its size depends on register size.
static inline constexpr bool isAbstract(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kIntPtr, TypeId::kUIntPtr); }
static constexpr bool isMask(uint32_t typeId) noexcept { return typeId >= _kIdMaskStart && typeId <= _kIdMaskEnd; }
static constexpr bool isMask8(uint32_t typeId) noexcept { return typeId == kIdMask8; }
static constexpr bool isMask16(uint32_t typeId) noexcept { return typeId == kIdMask16; }
static constexpr bool isMask32(uint32_t typeId) noexcept { return typeId == kIdMask32; }
static constexpr bool isMask64(uint32_t typeId) noexcept { return typeId == kIdMask64; }
//! Tests whether a given type is a scalar integer (signed or unsigned) of any size.
static inline constexpr bool isInt(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kIntStart, TypeId::_kIntEnd); }
//! Tests whether a given type is a scalar 8-bit integer (signed).
static inline constexpr bool isInt8(TypeId typeId) noexcept { return typeId == TypeId::kInt8; }
//! Tests whether a given type is a scalar 8-bit integer (unsigned).
static inline constexpr bool isUInt8(TypeId typeId) noexcept { return typeId == TypeId::kUInt8; }
//! Tests whether a given type is a scalar 16-bit integer (signed).
static inline constexpr bool isInt16(TypeId typeId) noexcept { return typeId == TypeId::kInt16; }
//! Tests whether a given type is a scalar 16-bit integer (unsigned).
static inline constexpr bool isUInt16(TypeId typeId) noexcept { return typeId == TypeId::kUInt16; }
//! Tests whether a given type is a scalar 32-bit integer (signed).
static inline constexpr bool isInt32(TypeId typeId) noexcept { return typeId == TypeId::kInt32; }
//! Tests whether a given type is a scalar 32-bit integer (unsigned).
static inline constexpr bool isUInt32(TypeId typeId) noexcept { return typeId == TypeId::kUInt32; }
//! Tests whether a given type is a scalar 64-bit integer (signed).
static inline constexpr bool isInt64(TypeId typeId) noexcept { return typeId == TypeId::kInt64; }
//! Tests whether a given type is a scalar 64-bit integer (unsigned).
static inline constexpr bool isUInt64(TypeId typeId) noexcept { return typeId == TypeId::kUInt64; }
static constexpr bool isMmx(uint32_t typeId) noexcept { return typeId >= _kIdMmxStart && typeId <= _kIdMmxEnd; }
static constexpr bool isMmx32(uint32_t typeId) noexcept { return typeId == kIdMmx32; }
static constexpr bool isMmx64(uint32_t typeId) noexcept { return typeId == kIdMmx64; }
static inline constexpr bool isGp8(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt8, TypeId::kUInt8); }
static inline constexpr bool isGp16(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt16, TypeId::kUInt16); }
static inline constexpr bool isGp32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt32, TypeId::kUInt32); }
static inline constexpr bool isGp64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::kInt64, TypeId::kUInt64); }
static constexpr bool isVec(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec512End; }
static constexpr bool isVec32(uint32_t typeId) noexcept { return typeId >= _kIdVec32Start && typeId <= _kIdVec32End; }
static constexpr bool isVec64(uint32_t typeId) noexcept { return typeId >= _kIdVec64Start && typeId <= _kIdVec64End; }
static constexpr bool isVec128(uint32_t typeId) noexcept { return typeId >= _kIdVec128Start && typeId <= _kIdVec128End; }
static constexpr bool isVec256(uint32_t typeId) noexcept { return typeId >= _kIdVec256Start && typeId <= _kIdVec256End; }
static constexpr bool isVec512(uint32_t typeId) noexcept { return typeId >= _kIdVec512Start && typeId <= _kIdVec512End; }
//! Tests whether a given type is a scalar floating point of any size.
static inline constexpr bool isFloat(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kFloatStart, TypeId::_kFloatEnd); }
//! Tests whether a given type is a scalar 32-bit float.
static inline constexpr bool isFloat32(TypeId typeId) noexcept { return typeId == TypeId::kFloat32; }
//! Tests whether a given type is a scalar 64-bit float.
static inline constexpr bool isFloat64(TypeId typeId) noexcept { return typeId == TypeId::kFloat64; }
//! Tests whether a given type is a scalar 80-bit float.
static inline constexpr bool isFloat80(TypeId typeId) noexcept { return typeId == TypeId::kFloat80; }
static inline constexpr bool isMask(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMaskStart, TypeId::_kMaskEnd); }
static inline constexpr bool isMask8(TypeId typeId) noexcept { return typeId == TypeId::kMask8; }
static inline constexpr bool isMask16(TypeId typeId) noexcept { return typeId == TypeId::kMask16; }
static inline constexpr bool isMask32(TypeId typeId) noexcept { return typeId == TypeId::kMask32; }
static inline constexpr bool isMask64(TypeId typeId) noexcept { return typeId == TypeId::kMask64; }
static inline constexpr bool isMmx(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kMmxStart, TypeId::_kMmxEnd); }
static inline constexpr bool isMmx32(TypeId typeId) noexcept { return typeId == TypeId::kMmx32; }
static inline constexpr bool isMmx64(TypeId typeId) noexcept { return typeId == TypeId::kMmx64; }
static inline constexpr bool isVec(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec512End); }
static inline constexpr bool isVec32(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec32Start, TypeId::_kVec32End); }
static inline constexpr bool isVec64(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec64Start, TypeId::_kVec64End); }
static inline constexpr bool isVec128(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec128Start, TypeId::_kVec128End); }
static inline constexpr bool isVec256(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec256Start, TypeId::_kVec256End); }
static inline constexpr bool isVec512(TypeId typeId) noexcept { return isBetween(typeId, TypeId::_kVec512Start, TypeId::_kVec512End); }
//! \cond
enum TypeCategory : uint32_t {
@@ -215,157 +242,174 @@ enum TypeCategory : uint32_t {
kTypeCategoryFunction = 4
};
template<typename T, uint32_t Category>
struct IdOfT_ByCategory {}; // Fails if not specialized.
template<typename T, TypeCategory kCategory>
struct TypeIdOfT_ByCategory {}; // Fails if not specialized.
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryIntegral> {
struct TypeIdOfT_ByCategory<T, kTypeCategoryIntegral> {
enum : uint32_t {
kTypeId = (sizeof(T) == 1 && std::is_signed<T>::value) ? kIdI8 :
(sizeof(T) == 1 && !std::is_signed<T>::value) ? kIdU8 :
(sizeof(T) == 2 && std::is_signed<T>::value) ? kIdI16 :
(sizeof(T) == 2 && !std::is_signed<T>::value) ? kIdU16 :
(sizeof(T) == 4 && std::is_signed<T>::value) ? kIdI32 :
(sizeof(T) == 4 && !std::is_signed<T>::value) ? kIdU32 :
(sizeof(T) == 8 && std::is_signed<T>::value) ? kIdI64 :
(sizeof(T) == 8 && !std::is_signed<T>::value) ? kIdU64 : kIdVoid
kTypeId = uint32_t(
(sizeof(T) == 1 && std::is_signed<T>::value) ? TypeId::kInt8 :
(sizeof(T) == 1 && !std::is_signed<T>::value) ? TypeId::kUInt8 :
(sizeof(T) == 2 && std::is_signed<T>::value) ? TypeId::kInt16 :
(sizeof(T) == 2 && !std::is_signed<T>::value) ? TypeId::kUInt16 :
(sizeof(T) == 4 && std::is_signed<T>::value) ? TypeId::kInt32 :
(sizeof(T) == 4 && !std::is_signed<T>::value) ? TypeId::kUInt32 :
(sizeof(T) == 8 && std::is_signed<T>::value) ? TypeId::kInt64 :
(sizeof(T) == 8 && !std::is_signed<T>::value) ? TypeId::kUInt64 : TypeId::kVoid)
};
};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
struct TypeIdOfT_ByCategory<T, kTypeCategoryFloatingPoint> {
enum : uint32_t {
kTypeId = (sizeof(T) == 4 ) ? kIdF32 :
(sizeof(T) == 8 ) ? kIdF64 :
(sizeof(T) >= 10) ? kIdF80 : kIdVoid
kTypeId = uint32_t(
(sizeof(T) == 4 ) ? TypeId::kFloat32 :
(sizeof(T) == 8 ) ? TypeId::kFloat64 :
(sizeof(T) >= 10) ? TypeId::kFloat80 : TypeId::kVoid)
};
};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryEnum>
: public IdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {};
struct TypeIdOfT_ByCategory<T, kTypeCategoryEnum>
: public TypeIdOfT_ByCategory<typename std::underlying_type<T>::type, kTypeCategoryIntegral> {};
template<typename T>
struct IdOfT_ByCategory<T, kTypeCategoryFunction> {
enum: uint32_t { kTypeId = kIdUIntPtr };
struct TypeIdOfT_ByCategory<T, kTypeCategoryFunction> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
};
//! \endcond
//! IdOfT<> template allows to get a TypeId from a C++ type `T`.
template<typename T>
struct IdOfT
//! TypeIdOfT<> template allows to get a TypeId from a C++ type `T`.
#ifdef _DOXYGEN
template<typename T>
struct TypeIdOfT {
//! TypeId of C++ type `T`.
static constexpr uint32_t kTypeId = _TypeIdDeducedAtCompileTime_;
static constexpr TypeId kTypeId = _TypeIdDeducedAtCompileTime_;
};
#else
: public IdOfT_ByCategory<T,
template<typename T>
struct TypeIdOfT
: public TypeIdOfT_ByCategory<T,
std::is_enum<T>::value ? kTypeCategoryEnum :
std::is_integral<T>::value ? kTypeCategoryIntegral :
std::is_floating_point<T>::value ? kTypeCategoryFloatingPoint :
std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown>
std::is_function<T>::value ? kTypeCategoryFunction : kTypeCategoryUnknown> {};
#endif
{};
//! \cond
template<typename T>
struct IdOfT<T*> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
struct TypeIdOfT<T*> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
};
template<typename T>
struct IdOfT<T&> { enum : uint32_t { kTypeId = kIdUIntPtr }; };
struct TypeIdOfT<T&> {
enum : uint32_t {
kTypeId = uint32_t(TypeId::kUIntPtr)
};
};
//! \endcond
static inline uint32_t baseOf(uint32_t typeId) noexcept {
ASMJIT_ASSERT(typeId <= kIdMax);
return _typeData.baseOf[typeId];
}
//! Returns a corresponding \ref TypeId of `T` type.
template<typename T>
static inline constexpr TypeId typeIdOfT() noexcept { return TypeId(TypeIdOfT<T>::kTypeId); }
static inline uint32_t sizeOf(uint32_t typeId) noexcept {
ASMJIT_ASSERT(typeId <= kIdMax);
return _typeData.sizeOf[typeId];
}
//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId
//! into a type that matches `registerSize` (general-purpose register size).
//! If you find such TypeId it's then only about adding the offset to it.
//! Returns offset needed to convert a `kIntPtr` and `kUIntPtr` TypeId into a type that matches `registerSize`
//! (general-purpose register size). If you find such TypeId it's then only about adding the offset to it.
//!
//! For example:
//!
//! ```
//! uint32_t registerSize = '4' or '8';
//! uint32_t deabstractDelta = Type::deabstractDeltaOfSize(registerSize);
//! uint32_t registerSize = /* 4 or 8 */;
//! uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize);
//!
//! uint32_t typeId = 'some type-id';
//! TypeId typeId = 'some type-id';
//!
//! // Normalize some typeId into a non-abstract typeId.
//! if (Type::isAbstract(typeId)) typeId += deabstractDelta;
//! if (TypeUtils::isAbstract(typeId)) typeId += deabstractDelta;
//!
//! // The same, but by using Type::deabstract() function.
//! typeId = Type::deabstract(typeId, deabstractDelta);
//! // The same, but by using TypeUtils::deabstract() function.
//! typeId = TypeUtils::deabstract(typeId, deabstractDelta);
//! ```
static constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
return registerSize >= 8 ? kIdI64 - kIdIntPtr : kIdI32 - kIdIntPtr;
static inline constexpr uint32_t deabstractDeltaOfSize(uint32_t registerSize) noexcept {
return registerSize >= 8 ? uint32_t(TypeId::kInt64) - uint32_t(TypeId::kIntPtr)
: uint32_t(TypeId::kInt32) - uint32_t(TypeId::kIntPtr);
}
static constexpr uint32_t deabstract(uint32_t typeId, uint32_t deabstractDelta) noexcept {
return isAbstract(typeId) ? typeId + deabstractDelta : typeId;
//! Deabstracts a given `typeId` into a native type by using `deabstractDelta`, which was previously
//! calculated by calling \ref deabstractDeltaOfSize() with a target native register size.
static inline constexpr TypeId deabstract(TypeId typeId, uint32_t deabstractDelta) noexcept {
return isAbstract(typeId) ? TypeId(uint32_t(typeId) + deabstractDelta) : typeId;
}
static inline constexpr TypeId scalarToVector(TypeId scalarTypeId, TypeId vecStartId) noexcept {
return TypeId(uint32_t(vecStartId) + uint32_t(scalarTypeId) - uint32_t(TypeId::kInt8));
}
} // {TypeUtils}
//! Provides type identifiers that can be used in templates instead of native types.
namespace Type {
//! bool as C++ type-name.
struct Bool {};
//! int8_t as C++ type-name.
struct I8 {};
struct Int8 {};
//! uint8_t as C++ type-name.
struct U8 {};
struct UInt8 {};
//! int16_t as C++ type-name.
struct I16 {};
struct Int16 {};
//! uint16_t as C++ type-name.
struct U16 {};
struct UInt16 {};
//! int32_t as C++ type-name.
struct I32 {};
struct Int32 {};
//! uint32_t as C++ type-name.
struct U32 {};
struct UInt32 {};
//! int64_t as C++ type-name.
struct I64 {};
struct Int64 {};
//! uint64_t as C++ type-name.
struct U64 {};
struct UInt64 {};
//! intptr_t as C++ type-name.
struct IPtr {};
struct IntPtr {};
//! uintptr_t as C++ type-name.
struct UPtr {};
struct UIntPtr {};
//! float as C++ type-name.
struct F32 {};
struct Float32 {};
//! double as C++ type-name.
struct F64 {};
struct Float64 {};
} // {Type}
// ============================================================================
// [ASMJIT_DEFINE_TYPE_ID]
// ============================================================================
//! \cond
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace Type { \
template<> \
struct IdOfT<T> { \
enum : uint32_t { kTypeId = TYPE_ID }; \
}; \
#define ASMJIT_DEFINE_TYPE_ID(T, TYPE_ID) \
namespace TypeUtils { \
template<> \
struct TypeIdOfT<T> { \
enum : uint32_t { \
kTypeId = uint32_t(TYPE_ID) \
}; \
}; \
}
ASMJIT_DEFINE_TYPE_ID(void, kIdVoid);
ASMJIT_DEFINE_TYPE_ID(Bool, kIdU8);
ASMJIT_DEFINE_TYPE_ID(I8 , kIdI8);
ASMJIT_DEFINE_TYPE_ID(U8 , kIdU8);
ASMJIT_DEFINE_TYPE_ID(I16 , kIdI16);
ASMJIT_DEFINE_TYPE_ID(U16 , kIdU16);
ASMJIT_DEFINE_TYPE_ID(I32 , kIdI32);
ASMJIT_DEFINE_TYPE_ID(U32 , kIdU32);
ASMJIT_DEFINE_TYPE_ID(I64 , kIdI64);
ASMJIT_DEFINE_TYPE_ID(U64 , kIdU64);
ASMJIT_DEFINE_TYPE_ID(IPtr, kIdIntPtr);
ASMJIT_DEFINE_TYPE_ID(UPtr, kIdUIntPtr);
ASMJIT_DEFINE_TYPE_ID(F32 , kIdF32);
ASMJIT_DEFINE_TYPE_ID(F64 , kIdF64);
ASMJIT_DEFINE_TYPE_ID(void , TypeId::kVoid);
ASMJIT_DEFINE_TYPE_ID(Type::Bool , TypeId::kUInt8);
ASMJIT_DEFINE_TYPE_ID(Type::Int8 , TypeId::kInt8);
ASMJIT_DEFINE_TYPE_ID(Type::UInt8 , TypeId::kUInt8);
ASMJIT_DEFINE_TYPE_ID(Type::Int16 , TypeId::kInt16);
ASMJIT_DEFINE_TYPE_ID(Type::UInt16 , TypeId::kUInt16);
ASMJIT_DEFINE_TYPE_ID(Type::Int32 , TypeId::kInt32);
ASMJIT_DEFINE_TYPE_ID(Type::UInt32 , TypeId::kUInt32);
ASMJIT_DEFINE_TYPE_ID(Type::Int64 , TypeId::kInt64);
ASMJIT_DEFINE_TYPE_ID(Type::UInt64 , TypeId::kUInt64);
ASMJIT_DEFINE_TYPE_ID(Type::IntPtr , TypeId::kIntPtr);
ASMJIT_DEFINE_TYPE_ID(Type::UIntPtr, TypeId::kUIntPtr);
ASMJIT_DEFINE_TYPE_ID(Type::Float32, TypeId::kFloat32);
ASMJIT_DEFINE_TYPE_ID(Type::Float64, TypeId::kFloat64);
//! \endcond
//! \}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#ifndef ASMJIT_NO_JIT
@@ -44,9 +26,11 @@
// Apple recently introduced MAP_JIT flag, which we want to use.
#if defined(__APPLE__)
#include <pthread.h>
#include <TargetConditionals.h>
#if TARGET_OS_OSX
#include <sys/utsname.h>
#include <libkern/OSCacheControl.h> // sys_icache_invalidate().
#endif
// Older SDK doesn't define `MAP_JIT`.
#ifndef MAP_JIT
@@ -54,7 +38,7 @@
#endif
#endif
// BSD/OSX: `MAP_ANONYMOUS` is not defined, `MAP_ANON` is.
// BSD/MAC: `MAP_ANONYMOUS` is not defined, `MAP_ANON` is.
#if !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -75,22 +59,25 @@
#define ASMJIT_VM_SHM_AVAILABLE 1
#endif
ASMJIT_BEGIN_NAMESPACE
#if defined(__APPLE__) && ASMJIT_ARCH_ARM >= 64
#define ASMJIT_HAS_PTHREAD_JIT_WRITE_PROTECT_NP
#endif
// ============================================================================
// [asmjit::VirtMem - Utilities]
// ============================================================================
ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem)
static const uint32_t VirtMem_dualMappingFilter[2] = {
VirtMem::kAccessWrite | VirtMem::kMMapMaxAccessWrite,
VirtMem::kAccessExecute | VirtMem::kMMapMaxAccessExecute
// Virtual Memory Utilities
// ========================
static const MemoryFlags dualMappingFilter[2] = {
MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite,
MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute
};
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Windows]]
// ============================================================================
// Virtual Memory [Windows]
// ========================
#if defined(_WIN32)
struct ScopedHandle {
inline ScopedHandle() noexcept
: value(nullptr) {}
@@ -103,7 +90,7 @@ struct ScopedHandle {
HANDLE value;
};
static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
static void getVMInfo(Info& vmInfo) noexcept {
SYSTEM_INFO systemInfo;
::GetSystemInfo(&systemInfo);
@@ -111,15 +98,15 @@ static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
vmInfo.pageGranularity = systemInfo.dwAllocationGranularity;
}
// Returns windows-specific protectFlags from \ref VirtMem::Flags.
static DWORD VirtMem_winProtectFlagsFromFlags(uint32_t flags) noexcept {
// Returns windows-specific protectFlags from \ref MemoryFlags.
static DWORD protectFlagsFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD protectFlags;
// READ|WRITE|EXECUTE.
if (flags & VirtMem::kAccessExecute)
protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else if (flags & VirtMem::kAccessRW)
protectFlags = (flags & VirtMem::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute))
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_EXECUTE_READWRITE : PAGE_EXECUTE_READ;
else if (Support::test(memoryFlags, MemoryFlags::kAccessRW))
protectFlags = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? PAGE_READWRITE : PAGE_READONLY;
else
protectFlags = PAGE_NOACCESS;
@@ -127,19 +114,23 @@ static DWORD VirtMem_winProtectFlagsFromFlags(uint32_t flags) noexcept {
return protectFlags;
}
static DWORD VirtMem_winDesiredAccessFromFlags(uint32_t flags) noexcept {
DWORD access = (flags & VirtMem::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
if (flags & VirtMem::kAccessExecute)
static DWORD desiredAccessFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
DWORD access = Support::test(memoryFlags, MemoryFlags::kAccessWrite) ? FILE_MAP_WRITE : FILE_MAP_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute))
access |= FILE_MAP_EXECUTE;
return access;
}
Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
return HardenedRuntimeFlags::kNone;
}
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
*p = nullptr;
if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument);
DWORD protectFlags = VirtMem_winProtectFlagsFromFlags(flags);
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
void* result = ::VirtualAlloc(nullptr, size, MEM_COMMIT | MEM_RESERVE, protectFlags);
if (!result)
@@ -149,15 +140,15 @@ Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
return kErrorOk;
}
Error VirtMem::release(void* p, size_t size) noexcept {
Error release(void* p, size_t size) noexcept {
DebugUtils::unused(size);
if (ASMJIT_UNLIKELY(!::VirtualFree(p, 0, MEM_RELEASE)))
return DebugUtils::errored(kErrorInvalidArgument);
return kErrorOk;
}
Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
DWORD protectFlags = VirtMem_winProtectFlagsFromFlags(flags);
Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
DWORD protectFlags = protectFlagsFromMemoryFlags(memoryFlags);
DWORD oldFlags;
if (::VirtualProtect(p, size, protectFlags, &oldFlags))
@@ -166,8 +157,8 @@ Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
return DebugUtils::errored(kErrorInvalidArgument);
}
Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
dm->ro = nullptr;
Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) noexcept {
dm->rx = nullptr;
dm->rw = nullptr;
if (size == 0)
@@ -187,8 +178,8 @@ Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) no
void* ptr[2];
for (uint32_t i = 0; i < 2; i++) {
uint32_t accessFlags = flags & ~VirtMem_dualMappingFilter[i];
DWORD desiredAccess = VirtMem_winDesiredAccessFromFlags(accessFlags);
MemoryFlags accessFlags = memoryFlags & ~dualMappingFilter[i];
DWORD desiredAccess = desiredAccessFromMemoryFlags(accessFlags);
ptr[i] = ::MapViewOfFile(handle.value, desiredAccess, 0, 0, size);
if (ptr[i] == nullptr) {
@@ -198,37 +189,36 @@ Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) no
}
}
dm->ro = ptr[0];
dm->rx = ptr[0];
dm->rw = ptr[1];
return kErrorOk;
}
Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
DebugUtils::unused(size);
bool failed = false;
if (!::UnmapViewOfFile(dm->ro))
if (!::UnmapViewOfFile(dm->rx))
failed = true;
if (dm->ro != dm->rw && !UnmapViewOfFile(dm->rw))
if (dm->rx != dm->rw && !UnmapViewOfFile(dm->rw))
failed = true;
if (failed)
return DebugUtils::errored(kErrorInvalidArgument);
dm->ro = nullptr;
dm->rx = nullptr;
dm->rw = nullptr;
return kErrorOk;
}
#endif
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Posix]]
// ============================================================================
// Virtual Memory [Posix]
// ======================
#if !defined(_WIN32)
static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
static void getVMInfo(Info& vmInfo) noexcept {
uint32_t pageSize = uint32_t(::getpagesize());
vmInfo.pageSize = pageSize;
@@ -236,14 +226,14 @@ static void VirtMem_getInfo(VirtMem::Info& vmInfo) noexcept {
}
#if !defined(SHM_ANON)
static const char* VirtMem_getTmpDir() noexcept {
static const char* getTmpDir() noexcept {
const char* tmpDir = getenv("TMPDIR");
return tmpDir ? tmpDir : "/tmp";
}
#endif
// Translates libc errors specific to VirtualMemory mapping to `asmjit::Error`.
static Error VirtMem_asmjitErrorFromErrno(int e) noexcept {
static Error asmjitErrorFromErrno(int e) noexcept {
switch (e) {
case EACCES:
case EAGAIN:
@@ -265,16 +255,14 @@ static Error VirtMem_asmjitErrorFromErrno(int e) noexcept {
}
}
// Some operating systems don't allow /dev/shm to be executable. On Linux this
// happens when /dev/shm is mounted with 'noexec', which is enforced by systemd.
// Other operating systems like MacOS also restrict executable permissions regarding
// /dev/shm, so we use a runtime detection before attempting to allocate executable
// memory. Sometimes we don't need the detection as we know it would always result
// in `kShmStrategyTmpDir`.
enum ShmStrategy : uint32_t {
kShmStrategyUnknown = 0,
kShmStrategyDevShm = 1,
kShmStrategyTmpDir = 2
// Some operating systems don't allow /dev/shm to be executable. On Linux this happens when /dev/shm is mounted with
// 'noexec', which is enforced by systemd. Other operating systems like MacOS also restrict executable permissions
// regarding /dev/shm, so we use a runtime detection before attempting to allocate executable memory. Sometimes we
// don't need the detection as we know it would always result in `ShmStrategy::kTmpDir`.
enum class ShmStrategy : uint32_t {
kUnknown = 0,
kDevShm = 1,
kTmpDir = 2
};
class AnonymousMemory {
@@ -289,17 +277,17 @@ public:
FileType _fileType;
StringTmp<128> _tmpName;
ASMJIT_INLINE AnonymousMemory() noexcept
inline AnonymousMemory() noexcept
: _fd(-1),
_fileType(kFileTypeNone),
_tmpName() {}
ASMJIT_INLINE ~AnonymousMemory() noexcept {
inline ~AnonymousMemory() noexcept {
unlink();
close();
}
ASMJIT_INLINE int fd() const noexcept { return _fd; }
inline int fd() const noexcept { return _fd; }
Error open(bool preferTmpOverDevShm) noexcept {
#if defined(__linux__) && defined(__NR_memfd_create)
@@ -319,7 +307,7 @@ public:
if (e == ENOSYS)
memfd_create_not_supported = 1;
else
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
#endif
@@ -331,13 +319,12 @@ public:
if (ASMJIT_LIKELY(_fd >= 0))
return kErrorOk;
else
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(errno));
return DebugUtils::errored(asmjitErrorFromErrno(errno));
#else
// POSIX API. We have to generate somehow a unique name. This is nothing
// cryptographic, just using a bit from the stack address to always have
// a different base for different threads (as threads have their own stack)
// and retries for avoiding collisions. We use `shm_open()` with flags that
// require creation of the file so we never open an existing shared memory.
// POSIX API. We have to generate somehow a unique name. This is nothing cryptographic, just using a bit from
// the stack address to always have a different base for different threads (as threads have their own stack)
// and retries for avoiding collisions. We use `shm_open()` with flags that require creation of the file so we
// never open an existing shared memory.
static std::atomic<uint32_t> internalCounter;
const char* kShmFormat = "/shm-id-%016llX";
@@ -351,7 +338,7 @@ public:
bool useTmp = !ASMJIT_VM_SHM_DETECT || preferTmpOverDevShm;
if (useTmp) {
_tmpName.assign(VirtMem_getTmpDir());
_tmpName.assign(getTmpDir());
_tmpName.appendFormat(kShmFormat, (unsigned long long)bits);
_fd = ::open(_tmpName.data(), O_RDWR | O_CREAT | O_EXCL, 0);
if (ASMJIT_LIKELY(_fd >= 0)) {
@@ -372,7 +359,7 @@ public:
int e = errno;
if (e != EEXIST)
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
return DebugUtils::errored(kErrorFailedToOpenAnonymousMemory);
@@ -406,17 +393,30 @@ public:
Error allocate(size_t size) noexcept {
// TODO: Improve this by using `posix_fallocate()` when available.
if (ftruncate(_fd, off_t(size)) != 0)
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(errno));
return DebugUtils::errored(asmjitErrorFromErrno(errno));
return kErrorOk;
}
};
// Returns `mmap()` protection flags from \ref MemoryFlags.
static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
int protection = 0;
if (Support::test(memoryFlags, MemoryFlags::kAccessRead)) protection |= PROT_READ;
if (Support::test(memoryFlags, MemoryFlags::kAccessWrite)) protection |= PROT_READ | PROT_WRITE;
if (Support::test(memoryFlags, MemoryFlags::kAccessExecute)) protection |= PROT_READ | PROT_EXEC;
return protection;
}
#if defined(__APPLE__)
// Detects whether the current process is hardened, which means that pages that
// have WRITE and EXECUTABLE flags cannot be allocated without MAP_JIT flag.
static ASMJIT_INLINE bool VirtMem_isHardened() noexcept {
static volatile uint32_t globalHardenedFlag;
// Detects whether the current process is hardened, which means that pages that have WRITE and EXECUTABLE flags cannot
// be allocated without MAP_JIT flag.
static inline bool hasHardenedRuntimeMacOS() noexcept {
#if TARGET_OS_OSX && ASMJIT_ARCH_ARM >= 64
// MacOS on AArch64 has always hardened runtime enabled.
return true;
#else
static std::atomic<uint32_t> globalHardenedFlag;
enum HardenedFlag : uint32_t {
kHardenedFlagUnknown = 0,
@@ -424,37 +424,40 @@ static ASMJIT_INLINE bool VirtMem_isHardened() noexcept {
kHardenedFlagEnabled = 2
};
uint32_t flag = globalHardenedFlag;
uint32_t flag = globalHardenedFlag.load();
if (flag == kHardenedFlagUnknown) {
VirtMem::Info memInfo;
VirtMem_getInfo(memInfo);
size_t pageSize = ::getpagesize();
void* ptr = mmap(nullptr, memInfo.pageSize, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
void* ptr = mmap(nullptr, pageSize, PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (ptr == MAP_FAILED) {
flag = kHardenedFlagEnabled;
}
else {
flag = kHardenedFlagDisabled;
munmap(ptr, memInfo.pageSize);
munmap(ptr, pageSize);
}
globalHardenedFlag = flag;
globalHardenedFlag.store(flag);
}
return flag == kHardenedFlagEnabled;
#endif
}
// MAP_JIT flag required to run unsigned JIT code is only supported by kernel
// version 10.14+ (Mojave) and IOS.
static ASMJIT_INLINE bool VirtMem_hasMapJitSupport() noexcept {
#if TARGET_OS_OSX
static volatile int globalVersion;
static inline bool hasMapJitSupportMacOS() noexcept {
#if TARGET_OS_OSX && ASMJIT_ARCH_ARM >= 64
// MacOS for 64-bit AArch architecture always uses hardened runtime. Some documentation can be found here:
// - https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon
return true;
#elif TARGET_OS_OSX
// MAP_JIT flag required to run unsigned JIT code is only supported by kernel version 10.14+ (Mojave) and IOS.
static std::atomic<uint32_t> globalVersion;
int ver = globalVersion;
int ver = globalVersion.load();
if (!ver) {
struct utsname osname;
struct utsname osname {};
uname(&osname);
ver = atoi(osname.release);
globalVersion = ver;
globalVersion.store(ver);
}
return ver >= 18;
#else
@@ -462,53 +465,63 @@ static ASMJIT_INLINE bool VirtMem_hasMapJitSupport() noexcept {
return true;
#endif
}
#endif
#endif // __APPLE__
// Returns `mmap()` protection flags from \ref VirtMem::Flags.
static int VirtMem_mmProtFromFlags(uint32_t flags) noexcept {
int protection = 0;
if (flags & VirtMem::kAccessRead) protection |= PROT_READ;
if (flags & VirtMem::kAccessWrite) protection |= PROT_READ | PROT_WRITE;
if (flags & VirtMem::kAccessExecute) protection |= PROT_READ | PROT_EXEC;
return protection;
// Detects whether the current process is hardened, which means that pages that have WRITE and EXECUTABLE flags
// cannot be normally allocated. On MacOS such allocation requires MAP_JIT flag.
static inline bool hasHardenedRuntime() noexcept {
#if defined(__APPLE__)
return hasHardenedRuntimeMacOS();
#else
return false;
#endif
}
// Detects whether MAP_JIT is available.
static inline bool hasMapJitSupport() noexcept {
#if defined(__APPLE__)
return hasMapJitSupportMacOS();
#else
return false;
#endif
}
// Returns either MAP_JIT or 0 based on `flags` and the host operating system.
static ASMJIT_INLINE int VirtMem_mmMapJitFromFlags(uint32_t flags) noexcept {
static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
#if defined(__APPLE__)
// Always use MAP_JIT flag if user asked for it (could be used for testing
// on non-hardened processes) and detect whether it must be used when the
// process is actually hardened (in that case it doesn't make sense to rely
// on user `flags`).
bool useMapJit = (flags & VirtMem::kMMapEnableMapJit) != 0 || VirtMem_isHardened();
// Always use MAP_JIT flag if user asked for it (could be used for testing on non-hardened processes) and detect
// whether it must be used when the process is actually hardened (in that case it doesn't make sense to rely on
// user `memoryFlags`).
bool useMapJit = Support::test(memoryFlags, MemoryFlags::kMMapEnableMapJit) || hasHardenedRuntime();
if (useMapJit)
return VirtMem_hasMapJitSupport() ? int(MAP_JIT) : 0;
return hasMapJitSupport() ? int(MAP_JIT) : 0;
else
return 0;
#else
DebugUtils::unused(flags);
DebugUtils::unused(memoryFlags);
return 0;
#endif
}
// Returns BSD-specific `PROT_MAX()` flags.
static ASMJIT_INLINE int VirtMem_mmMaxProtFromFlags(uint32_t flags) noexcept {
static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
#if defined(PROT_MAX)
static constexpr uint32_t kMaxProtShift = Support::constCtz(VirtMem::kMMapMaxAccessRead);
if (flags & (VirtMem::kMMapMaxAccessReadWrite | VirtMem::kMMapMaxAccessExecute))
return PROT_MAX(VirtMem_mmProtFromFlags(flags >> kMaxProtShift));
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
if (Support::test(memoryFlags, MemoryFlags::kMMapMaxAccessReadWrite | MemoryFlags::kMMapMaxAccessExecute))
return PROT_MAX(mmProtFromMemoryFlags((MemoryFlags)(uint32_t(memoryFlags) >> kMaxProtShift)));
else
return 0;
#else
DebugUtils::unused(flags);
DebugUtils::unused(memoryFlags);
return 0;
#endif
}
#if ASMJIT_VM_SHM_DETECT
static Error VirtMem_detectShmStrategy(uint32_t* strategyOut) noexcept {
static Error detectShmStrategy(ShmStrategy* strategyOut) noexcept {
AnonymousMemory anonMem;
VirtMem::Info vmInfo = VirtMem::info();
Info vmInfo = info();
ASMJIT_PROPAGATE(anonMem.open(false));
ASMJIT_PROPAGATE(anonMem.allocate(vmInfo.pageSize));
@@ -517,46 +530,57 @@ static Error VirtMem_detectShmStrategy(uint32_t* strategyOut) noexcept {
if (ptr == MAP_FAILED) {
int e = errno;
if (e == EINVAL) {
*strategyOut = kShmStrategyTmpDir;
*strategyOut = ShmStrategy::kTmpDir;
return kErrorOk;
}
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
else {
munmap(ptr, vmInfo.pageSize);
*strategyOut = kShmStrategyDevShm;
*strategyOut = ShmStrategy::kDevShm;
return kErrorOk;
}
}
#endif
static Error VirtMem_getShmStrategy(uint32_t* strategyOut) noexcept {
static Error getShmStrategy(ShmStrategy* strategyOut) noexcept {
#if ASMJIT_VM_SHM_DETECT
// Initially don't assume anything. It has to be tested whether
// '/dev/shm' was mounted with 'noexec' flag or not.
static volatile uint32_t globalShmStrategy = kShmStrategyUnknown;
// Initially don't assume anything. It has to be tested whether '/dev/shm' was mounted with 'noexec' flag or not.
static std::atomic<uint32_t> globalShmStrategy;
uint32_t strategy = globalShmStrategy;
if (strategy == kShmStrategyUnknown) {
ASMJIT_PROPAGATE(VirtMem_detectShmStrategy(&strategy));
globalShmStrategy = strategy;
ShmStrategy strategy = static_cast<ShmStrategy>(globalShmStrategy.load());
if (strategy == ShmStrategy::kUnknown) {
ASMJIT_PROPAGATE(detectShmStrategy(&strategy));
globalShmStrategy.store(static_cast<uint32_t>(strategy));
}
*strategyOut = strategy;
return kErrorOk;
#else
*strategyOut = kShmStrategyTmpDir;
*strategyOut = ShmStrategy::kTmpDir;
return kErrorOk;
#endif
}
Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
HardenedRuntimeFlags hrFlags = HardenedRuntimeFlags::kNone;
if (hasHardenedRuntime())
hrFlags |= HardenedRuntimeFlags::kEnabled;
if (hasMapJitSupport())
hrFlags |= HardenedRuntimeFlags::kMapJit;
return hrFlags;
}
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
*p = nullptr;
if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument);
int protection = VirtMem_mmProtFromFlags(flags) | VirtMem_mmMaxProtFromFlags(flags);
int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | VirtMem_mmMapJitFromFlags(flags);
int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags);
int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | mmMapJitFromMemoryFlags(memoryFlags);
void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0);
if (ptr == MAP_FAILED)
@@ -566,7 +590,7 @@ Error VirtMem::alloc(void** p, size_t size, uint32_t flags) noexcept {
return kErrorOk;
}
Error VirtMem::release(void* p, size_t size) noexcept {
Error release(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
return DebugUtils::errored(kErrorInvalidArgument);
@@ -574,26 +598,26 @@ Error VirtMem::release(void* p, size_t size) noexcept {
}
Error VirtMem::protect(void* p, size_t size, uint32_t flags) noexcept {
int protection = VirtMem_mmProtFromFlags(flags);
Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
int protection = mmProtFromMemoryFlags(memoryFlags);
if (mprotect(p, size, protection) == 0)
return kErrorOk;
return DebugUtils::errored(kErrorInvalidArgument);
}
Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept {
dm->ro = nullptr;
Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) noexcept {
dm->rx = nullptr;
dm->rw = nullptr;
if (off_t(size) <= 0)
return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
bool preferTmpOverDevShm = (flags & kMappingPreferTmp) != 0;
bool preferTmpOverDevShm = Support::test(memoryFlags, MemoryFlags::kMappingPreferTmp);
if (!preferTmpOverDevShm) {
uint32_t strategy;
ASMJIT_PROPAGATE(VirtMem_getShmStrategy(&strategy));
preferTmpOverDevShm = (strategy == kShmStrategyTmpDir);
ShmStrategy strategy;
ASMJIT_PROPAGATE(getShmStrategy(&strategy));
preferTmpOverDevShm = (strategy == ShmStrategy::kTmpDir);
}
AnonymousMemory anonMem;
@@ -602,8 +626,8 @@ Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) no
void* ptr[2];
for (uint32_t i = 0; i < 2; i++) {
uint32_t accessFlags = flags & ~VirtMem_dualMappingFilter[i];
int protection = VirtMem_mmProtFromFlags(accessFlags) | VirtMem_mmMaxProtFromFlags(accessFlags);
MemoryFlags accessFlags = memoryFlags & ~dualMappingFilter[i];
int protection = mmProtFromMemoryFlags(accessFlags) | mmMaxProtFromMemoryFlags(accessFlags);
ptr[i] = mmap(nullptr, size, protection, MAP_SHARED, anonMem.fd(), 0);
if (ptr[i] == MAP_FAILED) {
@@ -611,40 +635,61 @@ Error VirtMem::allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) no
int e = errno;
if (i == 1)
munmap(ptr[0], size);
return DebugUtils::errored(VirtMem_asmjitErrorFromErrno(e));
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
}
dm->ro = ptr[0];
dm->rx = ptr[0];
dm->rw = ptr[1];
return kErrorOk;
}
Error VirtMem::releaseDualMapping(DualMapping* dm, size_t size) noexcept {
Error err = release(dm->ro, size);
if (dm->ro != dm->rw)
Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
Error err = release(dm->rx, size);
if (dm->rx != dm->rw)
err |= release(dm->rw, size);
if (err)
return DebugUtils::errored(kErrorInvalidArgument);
dm->ro = nullptr;
dm->rx = nullptr;
dm->rw = nullptr;
return kErrorOk;
}
#endif
// ============================================================================
// [asmjit::VirtMem - Virtual Memory [Memory Info]]
// ============================================================================
// Virtual Memory - Flush Instruction Cache
// ========================================
VirtMem::Info VirtMem::info() noexcept {
static VirtMem::Info vmInfo;
void flushInstructionCache(void* p, size_t size) noexcept {
#if ASMJIT_ARCH_X86
// X86/X86_64 architecture doesn't require to do anything to flush instruction cache.
DebugUtils::unused(p, size);
#elif defined(__APPLE__)
sys_icache_invalidate(p, size);
#elif defined(_WIN32)
// Windows has a built-in support in `kernel32.dll`.
FlushInstructionCache(GetCurrentProcess(), p, size);
#elif defined(__GNUC__)
char* start = static_cast<char*>(p);
char* end = start + size;
__builtin___clear_cache(start, end);
#else
#pragma message("asmjit::VirtMem::flushInstructionCache() doesn't have implementation for the target OS and compiler")
DebugUtils::unused(p, size);
#endif
}
// Virtual Memory - Memory Info
// ============================
Info info() noexcept {
static std::atomic<uint32_t> vmInfoInitialized;
static Info vmInfo;
if (!vmInfoInitialized.load()) {
VirtMem::Info localMemInfo;
VirtMem_getInfo(localMemInfo);
Info localMemInfo;
getVMInfo(localMemInfo);
vmInfo = localMemInfo;
vmInfoInitialized.store(1u);
@@ -653,6 +698,24 @@ VirtMem::Info VirtMem::info() noexcept {
return vmInfo;
}
ASMJIT_END_NAMESPACE
// Virtual Memory - Hardened Runtime Info
// ======================================
HardenedRuntimeInfo hardenedRuntimeInfo() noexcept {
return HardenedRuntimeInfo { getHardenedRuntimeFlags() };
}
// Virtual Memory - Project JIT Memory
// ===================================
void protectJitMemory(ProtectJitAccess access) noexcept {
#if defined(ASMJIT_HAS_PTHREAD_JIT_WRITE_PROTECT_NP)
pthread_jit_write_protect_np(static_cast<uint32_t>(access));
#else
DebugUtils::unused(access);
#endif
}
ASMJIT_END_SUB_NAMESPACE
#endif

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_VIRTMEM_H_INCLUDED
#define ASMJIT_CORE_VIRTMEM_H_INCLUDED
@@ -34,65 +16,14 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_virtual_memory
//! \{
// ============================================================================
// [asmjit::VirtMem]
// ============================================================================
//! Virtual memory management.
namespace VirtMem {
//! Virtual memory access and mmap-specific flags.
enum Flags : uint32_t {
//! No access flags.
kAccessNone = 0x00000000u,
//! Memory is readable.
kAccessRead = 0x00000001u,
//! Memory is writable.
kAccessWrite = 0x00000002u,
//! Memory is executable.
kAccessExecute = 0x00000004u,
//! A combination of \ref kAccessRead and \ref kAccessWrite.
kAccessReadWrite = kAccessRead | kAccessWrite,
//! A combination of \ref kAccessRead, \ref kAccessWrite.
kAccessRW = kAccessRead | kAccessWrite,
//! A combination of \ref kAccessRead and \ref kAccessExecute.
kAccessRX = kAccessRead | kAccessExecute,
//! A combination of \ref kAccessRead, \ref kAccessWrite, and \ref kAccessExecute.
kAccessRWX = kAccessRead | kAccessWrite | kAccessExecute,
//! Use a `MAP_JIT` flag available on Apple platforms (introduced by Mojave),
//! which allows JIT code to be executed in MAC bundles. This flag is not turned
//! on by default, because when a process uses `fork()` the child process
//! has no access to the pages mapped with `MAP_JIT`, which could break code
//! that doesn't expect this behavior.
kMMapEnableMapJit = 0x00000010u,
//! Pass `PROT_MAX(PROT_READ)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessRead = 0x00000020u,
//! Pass `PROT_MAX(PROT_WRITE)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessWrite = 0x00000040u,
//! Pass `PROT_MAX(PROT_EXEC)` to mmap() on platforms that support `PROT_MAX`.
kMMapMaxAccessExecute = 0x00000080u,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessWrite.
kMMapMaxAccessReadWrite = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessWrite.
kMMapMaxAccessRW = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref kMMapMaxAccessRead and \ref kMMapMaxAccessExecute.
kMMapMaxAccessRX = kMMapMaxAccessRead | kMMapMaxAccessExecute,
//! A combination of \ref kMMapMaxAccessRead, \ref kMMapMaxAccessWrite, \ref kMMapMaxAccessExecute.
kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute,
//! Not an access flag, only used by `allocDualMapping()` to override the
//! default allocation strategy to always use a 'tmp' directory instead of
//! "/dev/shm" (on POSIX platforms). Please note that this flag will be
//! ignored if the operating system allows to allocate an executable memory
//! by a different API than `open()` or `shm_open()`. For example on Linux
//! `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is
//! used if SHM_ANON is defined.
kMappingPreferTmp = 0x80000000u
};
//! Flushes instruction cache in the given region.
//!
//! Only useful on non-x86 architectures, however, it's a good practice to call it on any platform to make your
//! code more portable.
ASMJIT_API void flushInstructionCache(void* p, size_t size) noexcept;
//! Virtual memory information.
struct Info {
@@ -102,59 +33,205 @@ struct Info {
uint32_t pageGranularity;
};
//! Dual memory mapping used to map an anonymous memory into two memory regions
//! where one region is read-only, but executable, and the second region is
//! read+write, but not executable. Please see \ref VirtMem::allocDualMapping()
//! for more details.
struct DualMapping {
//! Pointer to data with 'Read' or 'Read+Execute' access.
void* ro;
//! Pointer to data with 'Read-Write' access, but never 'Write+Execute'.
void* rw;
};
//! Returns virtual memory information, see `VirtMem::Info` for more details.
ASMJIT_API Info info() noexcept;
//! Allocates virtual memory by either using `mmap()` (POSIX) or `VirtualAlloc()`
//! (Windows).
//! Virtual memory access and mmap-specific flags.
enum class MemoryFlags : uint32_t {
//! No flags.
kNone = 0,
//! Memory is readable.
kAccessRead = 0x00000001u,
//! Memory is writable.
kAccessWrite = 0x00000002u,
//! Memory is executable.
kAccessExecute = 0x00000004u,
//! A combination of \ref MemoryFlags::kAccessRead and \ref MemoryFlags::kAccessWrite.
kAccessReadWrite = kAccessRead | kAccessWrite,
//! A combination of \ref MemoryFlags::kAccessRead, \ref MemoryFlags::kAccessWrite.
kAccessRW = kAccessRead | kAccessWrite,
//! A combination of \ref MemoryFlags::kAccessRead and \ref MemoryFlags::kAccessExecute.
kAccessRX = kAccessRead | kAccessExecute,
//! A combination of \ref MemoryFlags::kAccessRead, \ref MemoryFlags::kAccessWrite, and
//! \ref MemoryFlags::kAccessExecute.
kAccessRWX = kAccessRead | kAccessWrite | kAccessExecute,
//! Use a `MAP_JIT` flag available on Apple platforms (introduced by Mojave), which allows JIT code to be executed
//! in MAC bundles. This flag is not turned on by default, because when a process uses `fork()` the child process
//! has no access to the pages mapped with `MAP_JIT`, which could break code that doesn't expect this behavior.
//!
//! \note This flag can only be used with \ref VirtMem::alloc().
kMMapEnableMapJit = 0x00000010u,
//! Pass `PROT_MAX(PROT_READ)` to mmap() on platforms that support `PROT_MAX`.
//!
//! \note This flag can only be used with \ref VirtMem::alloc().
kMMapMaxAccessRead = 0x00000020u,
//! Pass `PROT_MAX(PROT_WRITE)` to mmap() on platforms that support `PROT_MAX`.
//!
//! \note This flag can only be used with \ref VirtMem::alloc().
kMMapMaxAccessWrite = 0x00000040u,
//! Pass `PROT_MAX(PROT_EXEC)` to mmap() on platforms that support `PROT_MAX`.
//!
//! \note This flag can only be used with \ref VirtMem::alloc().
kMMapMaxAccessExecute = 0x00000080u,
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessWrite.
kMMapMaxAccessReadWrite = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessWrite.
kMMapMaxAccessRW = kMMapMaxAccessRead | kMMapMaxAccessWrite,
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead and \ref MemoryFlags::kMMapMaxAccessExecute.
kMMapMaxAccessRX = kMMapMaxAccessRead | kMMapMaxAccessExecute,
//! A combination of \ref MemoryFlags::kMMapMaxAccessRead, \ref MemoryFlags::kMMapMaxAccessWrite, \ref
//! MemoryFlags::kMMapMaxAccessExecute.
kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute,
//! Not an access flag, only used by `allocDualMapping()` to override the default allocation strategy to always use
//! a 'tmp' directory instead of "/dev/shm" (on POSIX platforms). Please note that this flag will be ignored if the
//! operating system allows to allocate an executable memory by a different API than `open()` or `shm_open()`. For
//! example on Linux `memfd_create()` is preferred and on BSDs `shm_open(SHM_ANON, ...)` is used if SHM_ANON is
//! defined.
//!
//! \note This flag can only be used with \ref VirtMem::alloc().
kMappingPreferTmp = 0x80000000u
};
ASMJIT_DEFINE_ENUM_FLAGS(MemoryFlags)
//! Allocates virtual memory by either using `mmap()` (POSIX) or `VirtualAlloc()` (Windows).
//!
//! \note `size` should be aligned to page size, use \ref VirtMem::info()
//! to obtain it. Invalid size will not be corrected by the implementation
//! and the allocation would not succeed in such case.
ASMJIT_API Error alloc(void** p, size_t size, uint32_t flags) noexcept;
//! \note `size` should be aligned to page size, use \ref VirtMem::info() to obtain it. Invalid size will not be
//! corrected by the implementation and the allocation would not succeed in such case.
ASMJIT_API Error alloc(void** p, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory previously allocated by \ref VirtMem::alloc().
//!
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the
//! size is not the same value the call will fail on any POSIX system, but
//! pass on Windows, because it's implemented differently.
//! \note The size must be the same as used by \ref VirtMem::alloc(). If the size is not the same value the call
//! will fail on any POSIX system, but pass on Windows, because it's implemented differently.
ASMJIT_API Error release(void* p, size_t size) noexcept;
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()`
//! (Windows).
ASMJIT_API Error protect(void* p, size_t size, uint32_t flags) noexcept;
//! A cross-platform wrapper around `mprotect()` (POSIX) and `VirtualProtect()` (Windows).
ASMJIT_API Error protect(void* p, size_t size, MemoryFlags flags) noexcept;
//! Allocates virtual memory and creates two views of it where the first view
//! has no write access. This is an addition to the API that should be used
//! in cases in which the operating system either enforces W^X security policy
//! or the application wants to use this policy by default to improve security
//! and prevent an accidental (or purposed) self-modifying code.
//! Dual memory mapping used to map an anonymous memory into two memory regions where one region is read-only, but
//! executable, and the second region is read+write, but not executable. See \ref VirtMem::allocDualMapping() for
//! more details.
struct DualMapping {
//! Pointer to data with 'Read+Execute' access (this memory is not writable).
void* rx;
//! Pointer to data with 'Read+Write' access (this memory is not executable).
void* rw;
};
//! Allocates virtual memory and creates two views of it where the first view has no write access. This is an addition
//! to the API that should be used in cases in which the operating system either enforces W^X security policy or the
//! application wants to use this policy by default to improve security and prevent an accidental (or purposed)
//! self-modifying code.
//!
//! The memory returned in the `dm` are two independent mappings of the same
//! shared memory region. You must use \ref VirtMem::releaseDualMapping() to
//! release it when it's no longer needed. Never use `VirtMem::release()` to
//! release the memory returned by `allocDualMapping()` as that would fail on
//! Windows.
//! The memory returned in the `dm` are two independent mappings of the same shared memory region. You must use
//! \ref VirtMem::releaseDualMapping() to release it when it's no longer needed. Never use `VirtMem::release()` to
//! release the memory returned by `allocDualMapping()` as that would fail on Windows.
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function fails.
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, uint32_t flags) noexcept;
ASMJIT_API Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags flags) noexcept;
//! Releases virtual memory mapping previously allocated by \ref VirtMem::allocDualMapping().
//!
//! \remarks Both pointers in `dm` would be set to `nullptr` if the function succeeds.
ASMJIT_API Error releaseDualMapping(DualMapping* dm, size_t size) noexcept;
//! Hardened runtime flags.
enum class HardenedRuntimeFlags : uint32_t {
//! No flags.
kNone = 0,
//! Hardened runtime is enabled - it's not possible to have "Write & Execute" memory protection. The runtime
//! enforces W^X (either write or execute).
//!
//! \note If the runtime is hardened it means that an operating system specific protection is used. For example on
//! MacOS platform it's possible to allocate memory with MAP_JIT flag and then use `pthread_jit_write_protect_np()`
//! to temporarily swap access permissions for the current thread. Dual mapping is also a possibility on X86/X64
//! architecture.
kEnabled = 0x00000001u,
//! Read+Write+Execute can only be allocated with MAP_JIT flag (Apple specific).
kMapJit = 0x00000002u
};
ASMJIT_DEFINE_ENUM_FLAGS(HardenedRuntimeFlags)
//! Hardened runtime information.
struct HardenedRuntimeInfo {
//! Hardened runtime flags.
HardenedRuntimeFlags flags;
};
//! Returns runtime features provided by the OS.
ASMJIT_API HardenedRuntimeInfo hardenedRuntimeInfo() noexcept;
//! Values that can be used with `protectJitMemory()` function.
enum class ProtectJitAccess : uint32_t {
//! Protect JIT memory with Read+Write permissions.
kReadWrite = 0,
//! Protect JIT memory with Read+Execute permissions.
kReadExecute = 1
};
//! Protects access of memory mapped with MAP_JIT flag for the current thread.
//!
//! \note This feature is only available on Apple hardware (AArch64) at the moment and and uses a non-portable
//! `pthread_jit_write_protect_np()` call when available.
//!
//! This function must be called before and after a memory mapped with MAP_JIT flag is modified. Example:
//!
//! ```
//! void* codePtr = ...;
//! size_t codeSize = ...;
//!
//! VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite);
//! memcpy(codePtr, source, codeSize);
//! VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute);
//! VirtMem::flushInstructionCache(codePtr, codeSize);
//! ```
//!
//! See \ref ProtectJitReadWriteScope, which makes it simpler than the code above.
ASMJIT_API void protectJitMemory(ProtectJitAccess access) noexcept;
//! JIT protection scope that prepares the given memory block to be written to in the current thread.
//!
//! It calls `VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadWrite)` at construction time and
//! `VirtMem::protectJitMemory(VirtMem::ProtectJitAccess::kReadExecute)` combined with `flushInstructionCache()`
//! in destructor. The purpose of this class is to make writing to JIT memory easier.
class ProtectJitReadWriteScope {
public:
void* _rxPtr;
size_t _size;
//! Makes the given memory block RW protected.
ASMJIT_FORCE_INLINE ProtectJitReadWriteScope(void* rxPtr, size_t size) noexcept
: _rxPtr(rxPtr),
_size(size) {
protectJitMemory(ProtectJitAccess::kReadWrite);
}
// Not copyable.
ProtectJitReadWriteScope(const ProtectJitReadWriteScope& other) = delete;
//! Makes the memory block RX protected again and flushes instruction cache.
ASMJIT_FORCE_INLINE ~ProtectJitReadWriteScope() noexcept {
protectJitMemory(ProtectJitAccess::kReadExecute);
flushInstructionCache(_rxPtr, _size);
}
};
} // VirtMem
//! \}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
@@ -27,17 +9,15 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::Zone - Statics]
// ============================================================================
// Zone - Globals
// ==============
// Zero size block used by `Zone` that doesn't have any memory allocated.
// Should be allocated in read-only memory and should never be modified.
// Zero size block used by `Zone` that doesn't have any memory allocated. Should be allocated in read-only memory
// and should never be modified.
const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
// ============================================================================
// [asmjit::Zone - Init / Reset]
// ============================================================================
// Zone - Init & Reset
// ===================
void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
ASMJIT_ASSERT(blockSize >= kMinBlockSize);
@@ -66,28 +46,27 @@ void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Tempora
}
}
void Zone::reset(uint32_t resetPolicy) noexcept {
void Zone::reset(ResetPolicy resetPolicy) noexcept {
Block* cur = _block;
// Can't be altered.
if (cur == &_zeroBlock)
return;
if (resetPolicy == Globals::kResetHard) {
if (resetPolicy == ResetPolicy::kHard) {
Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
_ptr = initial->data();
_end = initial->data();
_block = initial;
// Since cur can be in the middle of the double-linked list, we have to
// traverse both directions (`prev` and `next`) separately to visit all.
// Since cur can be in the middle of the double-linked list, we have to traverse both directions (`prev` and
// `next`) separately to visit all.
Block* next = cur->next;
do {
Block* prev = cur->prev;
// If this is the first block and this ZoneTmp is temporary then the
// first block is statically allocated. We cannot free it and it makes
// sense to keep it even when this is hard reset.
// If this is the first block and this ZoneTmp is temporary then the first block is statically allocated.
// We cannot free it and it makes sense to keep it even when this is hard reset.
if (prev == nullptr && _isTemporary) {
cur->prev = nullptr;
cur->next = nullptr;
@@ -113,9 +92,8 @@ void Zone::reset(uint32_t resetPolicy) noexcept {
}
}
// ============================================================================
// [asmjit::Zone - Alloc]
// ============================================================================
// Zone - Alloc
// ============
void* Zone::_alloc(size_t size, size_t alignment) noexcept {
Block* curBlock = _block;
@@ -124,10 +102,9 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
size_t rawBlockAlignment = blockAlignment();
size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
// If the `Zone` has been cleared the current block doesn't have to be the
// last one. Check if there is a block that can be used instead of allocating
// a new one. If there is a `next` block it's completely unused, we don't have
// to check for remaining bytes in that case.
// If the `Zone` has been cleared the current block doesn't have to be the last one. Check if there is a block
// that can be used instead of allocating a new one. If there is a `next` block it's completely unused, we don't
// have to check for remaining bytes in that case.
if (next) {
uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
@@ -147,18 +124,16 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
if (ASMJIT_UNLIKELY(newSize > SIZE_MAX - kBlockSize - blockAlignmentOverhead))
return nullptr;
// Allocate new block - we add alignment overhead to `newSize`, which becomes the
// new block size, and we also add `kBlockOverhead` to the allocator as it includes
// members of `Zone::Block` structure.
// Allocate new block - we add alignment overhead to `newSize`, which becomes the new block size, and we also add
// `kBlockOverhead` to the allocator as it includes members of `Zone::Block` structure.
newSize += blockAlignmentOverhead;
Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
if (ASMJIT_UNLIKELY(!newBlock))
return nullptr;
// Align the pointer to `minimumAlignment` and adjust the size of this block
// accordingly. It's the same as using `minimumAlignment - Support::alignUpDiff()`,
// just written differently.
// Align the pointer to `minimumAlignment` and adjust the size of this block accordingly. It's the same as using
// `minimumAlignment - Support::alignUpDiff()`, just written differently.
{
newBlock->prev = nullptr;
newBlock->next = nullptr;
@@ -168,9 +143,8 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
newBlock->prev = curBlock;
curBlock->next = newBlock;
// Does only happen if there is a next block, but the requested memory
// can't fit into it. In this case a new buffer is allocated and inserted
// between the current block and the next one.
// Does only happen if there is a next block, but the requested memory can't fit into it. In this case a new
// buffer is allocated and inserted between the current block and the next one.
if (next) {
newBlock->next = next;
next->prev = newBlock;
@@ -226,9 +200,8 @@ char* Zone::sformat(const char* fmt, ...) noexcept {
return static_cast<char*>(dup(buf, size));
}
// ============================================================================
// [asmjit::ZoneAllocator - Helpers]
// ============================================================================
// ZoneAllocator - Utilities
// =========================
#if defined(ASMJIT_BUILD_DEBUG)
static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::DynamicBlock* block) noexcept {
@@ -242,9 +215,8 @@ static bool ZoneAllocator_hasDynamicBlock(ZoneAllocator* self, ZoneAllocator::Dy
}
#endif
// ============================================================================
// [asmjit::ZoneAllocator - Init / Reset]
// ============================================================================
// ZoneAllocator - Init & Reset
// ============================
void ZoneAllocator::reset(Zone* zone) noexcept {
// Free dynamic blocks.
@@ -260,9 +232,8 @@ void ZoneAllocator::reset(Zone* zone) noexcept {
_zone = zone;
}
// ============================================================================
// [asmjit::ZoneAllocator - Alloc / Release]
// ============================================================================
// asmjit::ZoneAllocator - Alloc & Release
// =======================================
void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONE_H_INCLUDED
#define ASMJIT_CORE_ZONE_H_INCLUDED
@@ -31,20 +13,14 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::Zone]
// ============================================================================
//! Zone memory.
//!
//! Zone is an incremental memory allocator that allocates memory by simply
//! incrementing a pointer. It allocates blocks of memory by using C's `malloc()`,
//! but divides these blocks into smaller segments requested by calling
//! Zone is an incremental memory allocator that allocates memory by simply incrementing a pointer. It allocates
//! blocks of memory by using C's `malloc()`, but divides these blocks into smaller segments requested by calling
//! `Zone::alloc()` and friends.
//!
//! Zone has no function to release the allocated memory. It has to be released
//! all at once by calling `reset()`. If you need a more friendly allocator that
//! also supports `release()`, consider using `Zone` with `ZoneAllocator`.
//! Zone has no function to release the allocated memory. It has to be released all at once by calling `reset()`.
//! If you need a more friendly allocator that also supports `release()`, consider using `Zone` with `ZoneAllocator`.
class Zone {
public:
ASMJIT_NONCOPYABLE(Zone)
@@ -103,28 +79,31 @@ public:
//! Creates a new Zone.
//!
//! The `blockSize` parameter describes the default size of the block. If the
//! `size` parameter passed to `alloc()` is greater than the default size
//! `Zone` will allocate and use a larger block, but it will not change the
//! The `blockSize` parameter describes the default size of the block. If the `size` parameter passed to `alloc()`
//! is greater than the default size `Zone` will allocate and use a larger block, but it will not change the
//! default `blockSize`.
//!
//! It's not required, but it's good practice to set `blockSize` to a
//! reasonable value that depends on the usage of `Zone`. Greater block sizes
//! are generally safer and perform better than unreasonably low block sizes.
ASMJIT_INLINE explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
//! It's not required, but it's good practice to set `blockSize` to a reasonable value that depends on the usage
//! of `Zone`. Greater block sizes are generally safer and perform better than unreasonably low block sizes.
inline explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
_init(blockSize, blockAlignment, nullptr);
}
ASMJIT_INLINE Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
//! Creates a new Zone with a first block pointing to a `temporary` memory.
inline Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
_init(blockSize, blockAlignment, &temporary);
}
//! \overload
inline Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
_init(blockSize, blockAlignment, temporary);
}
//! Moves an existing `Zone`.
//!
//! \note You cannot move an existing `ZoneTmp` as it uses embedded storage.
//! Attempting to move `ZoneTmp` would result in assertion failure in debug
//! mode and undefined behavior in release mode.
ASMJIT_INLINE Zone(Zone&& other) noexcept
//! \note You cannot move an existing `ZoneTmp` as it uses embedded storage. Attempting to move `ZoneTmp` would
//! result in assertion failure in debug mode and undefined behavior in release mode.
inline Zone(Zone&& other) noexcept
: _ptr(other._ptr),
_end(other._end),
_block(other._block),
@@ -137,16 +116,16 @@ public:
//! Destroys the `Zone` instance.
//!
//! This will destroy the `Zone` instance and release all blocks of memory
//! allocated by it. It performs implicit `reset(Globals::kResetHard)`.
ASMJIT_INLINE ~Zone() noexcept { reset(Globals::kResetHard); }
//! This will destroy the `Zone` instance and release all blocks of memory allocated by it. It performs implicit
//! `reset(ResetPolicy::kHard)`.
inline ~Zone() noexcept { reset(ResetPolicy::kHard); }
ASMJIT_API void _init(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept;
//! Resets the `Zone` invalidating all blocks allocated.
//!
//! See `Globals::ResetPolicy` for more details.
ASMJIT_API void reset(uint32_t resetPolicy = Globals::kResetSoft) noexcept;
ASMJIT_API void reset(ResetPolicy resetPolicy = ResetPolicy::kSoft) noexcept;
//! \}
@@ -154,29 +133,28 @@ public:
//! \{
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
ASMJIT_INLINE bool isTemporary() const noexcept { return _isTemporary != 0; }
inline bool isTemporary() const noexcept { return _isTemporary != 0; }
//! Returns the default block size.
ASMJIT_INLINE size_t blockSize() const noexcept { return _blockSize; }
inline size_t blockSize() const noexcept { return _blockSize; }
//! Returns the default block alignment.
ASMJIT_INLINE size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
inline size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
//! Returns remaining size of the current block.
ASMJIT_INLINE size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
inline size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
//! Returns the current zone cursor (dangerous).
//!
//! This is a function that can be used to get exclusive access to the current
//! block's memory buffer.
//! This is a function that can be used to get exclusive access to the current block's memory buffer.
template<typename T = uint8_t>
ASMJIT_INLINE T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
inline T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
//! Returns the end of the current zone block, only useful if you use `ptr()`.
template<typename T = uint8_t>
ASMJIT_INLINE T* end() noexcept { return reinterpret_cast<T*>(_end); }
inline T* end() noexcept { return reinterpret_cast<T*>(_end); }
//! Sets the current zone pointer to `ptr` (must be within the current block).
template<typename T>
ASMJIT_INLINE void setPtr(T* ptr) noexcept {
inline void setPtr(T* ptr) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_ptr = p;
@@ -184,7 +162,7 @@ public:
//! Sets the end zone pointer to `end` (must be within the current block).
template<typename T>
ASMJIT_INLINE void setEnd(T* end) noexcept {
inline void setEnd(T* end) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(end);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_end = p;
@@ -195,7 +173,7 @@ public:
//! \name Utilities
//! \{
ASMJIT_INLINE void swap(Zone& other) noexcept {
inline void swap(Zone& other) noexcept {
// This could lead to a disaster.
ASMJIT_ASSERT(!this->isTemporary());
ASMJIT_ASSERT(!other.isTemporary());
@@ -207,30 +185,29 @@ public:
}
//! Aligns the current pointer to `alignment`.
ASMJIT_INLINE void align(size_t alignment) noexcept {
inline void align(size_t alignment) noexcept {
_ptr = Support::min(Support::alignUp(_ptr, alignment), _end);
}
//! Ensures the remaining size is at least equal or greater than `size`.
//!
//! \note This function doesn't respect any alignment. If you need to ensure
//! there is enough room for an aligned allocation you need to call `align()`
//! before calling `ensure()`.
ASMJIT_INLINE Error ensure(size_t size) noexcept {
//! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned
//! allocation you need to call `align()` before calling `ensure()`.
inline Error ensure(size_t size) noexcept {
if (size <= remainingSize())
return kErrorOk;
else
return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
}
ASMJIT_INLINE void _assignBlock(Block* block) noexcept {
inline void _assignBlock(Block* block) noexcept {
size_t alignment = blockAlignment();
_ptr = Support::alignUp(block->data(), alignment);
_end = Support::alignDown(block->data() + block->size, alignment);
_block = block;
}
ASMJIT_INLINE void _assignZeroBlock() noexcept {
inline void _assignZeroBlock() noexcept {
Block* block = const_cast<Block*>(&_zeroBlock);
_ptr = block->data();
_end = block->data();
@@ -244,9 +221,8 @@ public:
//! Allocates the requested memory specified by `size`.
//!
//! Pointer returned is valid until the `Zone` instance is destroyed or reset
//! by calling `reset()`. If you plan to make an instance of C++ from the
//! given pointer use placement `new` and `delete` operators:
//! Pointer returned is valid until the `Zone` instance is destroyed or reset by calling `reset()`. If you plan to
//! make an instance of C++ from the given pointer use placement `new` and `delete` operators:
//!
//! ```
//! using namespace asmjit;
@@ -274,7 +250,7 @@ public:
//! // Reset or destroy `Zone`.
//! zone.reset();
//! ```
ASMJIT_INLINE void* alloc(size_t size) noexcept {
inline void* alloc(size_t size) noexcept {
if (ASMJIT_UNLIKELY(size > remainingSize()))
return _alloc(size, 1);
@@ -284,7 +260,7 @@ public:
}
//! Allocates the requested memory specified by `size` and `alignment`.
ASMJIT_INLINE void* alloc(size_t size, size_t alignment) noexcept {
inline void* alloc(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment);
@@ -298,7 +274,7 @@ public:
//! Allocates the requested memory specified by `size` without doing any checks.
//!
//! Can only be called if `remainingSize()` returns size at least equal to `size`.
ASMJIT_INLINE void* allocNoCheck(size_t size) noexcept {
inline void* allocNoCheck(size_t size) noexcept {
ASMJIT_ASSERT(remainingSize() >= size);
uint8_t* ptr = _ptr;
@@ -309,7 +285,7 @@ public:
//! Allocates the requested memory specified by `size` and `alignment` without doing any checks.
//!
//! Performs the same operation as `Zone::allocNoCheck(size)` with `alignment` applied.
ASMJIT_INLINE void* allocNoCheck(size_t size, size_t alignment) noexcept {
inline void* allocNoCheck(size_t size, size_t alignment) noexcept {
ASMJIT_ASSERT(Support::isPowerOf2(alignment));
uint8_t* ptr = Support::alignUp(_ptr, alignment);
@@ -324,25 +300,25 @@ public:
//! Like `alloc()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
inline T* allocT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(alloc(size, alignment));
}
//! Like `allocNoCheck()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
inline T* allocNoCheckT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocNoCheck(size, alignment));
}
//! Like `allocZeroed()`, but the return pointer is casted to `T*`.
template<typename T>
ASMJIT_INLINE T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
inline T* allocZeroedT(size_t size = sizeof(T), size_t alignment = alignof(T)) noexcept {
return static_cast<T*>(allocZeroed(size, alignment));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T>
ASMJIT_INLINE T* newT() noexcept {
inline T* newT() noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
return nullptr;
@@ -351,7 +327,7 @@ public:
//! Like `new(std::nothrow) T(...)`, but allocated by `Zone`.
template<typename T, typename... Args>
ASMJIT_INLINE T* newT(Args&&... args) noexcept {
inline T* newT(Args&&... args) noexcept {
void* p = alloc(sizeof(T), alignof(T));
if (ASMJIT_UNLIKELY(!p))
return nullptr;
@@ -368,7 +344,7 @@ public:
ASMJIT_API void* dup(const void* data, size_t size, bool nullTerminate = false) noexcept;
//! Helper to duplicate data.
ASMJIT_INLINE void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
inline void* dupAligned(const void* data, size_t size, size_t alignment, bool nullTerminate = false) noexcept {
align(alignment);
return dup(data, size, nullTerminate);
}
@@ -379,15 +355,10 @@ public:
//! \}
};
// ============================================================================
// [b2d::ZoneTmp]
// ============================================================================
//! \ref Zone with `N` bytes of a static storage, used for the initial block.
//!
//! Temporary zones are used in cases where it's known that some memory will be
//! required, but in many cases it won't exceed N bytes, so the whole operation
//! can be performed without a dynamic memory allocation.
//! Temporary zones are used in cases where it's known that some memory will be required, but in many cases it won't
//! exceed N bytes, so the whole operation can be performed without a dynamic memory allocation.
template<size_t N>
class ZoneTmp : public Zone {
public:
@@ -399,35 +370,29 @@ public:
} _storage;
//! Creates a temporary zone. Dynamic block size is specified by `blockSize`.
ASMJIT_INLINE explicit ZoneTmp(size_t blockSize, size_t blockAlignment = 1) noexcept
inline explicit ZoneTmp(size_t blockSize, size_t blockAlignment = 1) noexcept
: Zone(blockSize, blockAlignment, Support::Temporary(_storage.data, N)) {}
};
// ============================================================================
// [asmjit::ZoneAllocator]
// ============================================================================
//! Zone-based memory allocator that uses an existing `Zone` and provides a
//! `release()` functionality on top of it. It uses `Zone` only for chunks
//! that can be pooled, and uses libc `malloc()` for chunks that are large.
//! Zone-based memory allocator that uses an existing `Zone` and provides a `release()` functionality on top of it.
//! It uses `Zone` only for chunks that can be pooled, and uses libc `malloc()` for chunks that are large.
//!
//! The advantage of ZoneAllocator is that it can allocate small chunks of memory
//! really fast, and these chunks, when released, will be reused by consecutive
//! calls to `alloc()`. Also, since ZoneAllocator uses `Zone`, you can turn any
//! `Zone` into a `ZoneAllocator`, and use it in your `Pass` when necessary.
//! The advantage of ZoneAllocator is that it can allocate small chunks of memory really fast, and these chunks,
//! when released, will be reused by consecutive calls to `alloc()`. Also, since ZoneAllocator uses `Zone`, you can
//! turn any `Zone` into a `ZoneAllocator`, and use it in your `Pass` when necessary.
//!
//! ZoneAllocator is used by AsmJit containers to make containers having only
//! few elements fast (and lightweight) and to allow them to grow and use
//! dynamic blocks when require more storage.
//! ZoneAllocator is used by AsmJit containers to make containers having only few elements fast (and lightweight)
//! and to allow them to grow and use dynamic blocks when require more storage.
class ZoneAllocator {
public:
ASMJIT_NONCOPYABLE(ZoneAllocator)
//! \cond INTERNAL
enum {
// In short, we pool chunks of these sizes:
// [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
// In short, we pool chunks of these sizes:
// [32, 64, 96, 128, 192, 256, 320, 384, 448, 512]
enum : uint32_t {
//! How many bytes per a low granularity pool (has to be at least 16).
kLoGranularity = 32,
//! Number of slots of a low granularity pool.
@@ -452,9 +417,8 @@ public:
Slot* next;
};
//! A block of memory that has been allocated dynamically and is not part of
//! block-list used by the allocator. This is used to keep track of all these
//! blocks so they can be freed by `reset()` if not freed explicitly.
//! A block of memory that has been allocated dynamically and is not part of block-list used by the allocator.
//! This is used to keep track of all these blocks so they can be freed by `reset()` if not freed explicitly.
struct DynamicBlock {
DynamicBlock* prev;
DynamicBlock* next;
@@ -462,6 +426,9 @@ public:
//! \endcond
//! \name Members
//! \{
//! Zone used to allocate memory that fits into slots.
Zone* _zone;
//! Indexed slots containing released memory.
@@ -469,6 +436,8 @@ public:
//! Dynamic blocks for larger allocations (no slots).
DynamicBlock* _dynamicBlocks;
//! \}
//! \name Construction & Destruction
//! \{
@@ -496,9 +465,9 @@ public:
//! It's the same as calling `reset(zone)`.
inline void init(Zone* zone) noexcept { reset(zone); }
//! Resets this `ZoneAllocator` and also forget about the current `Zone` which
//! is attached (if any). Reset optionally attaches a new `zone` passed, or
//! keeps the `ZoneAllocator` in an uninitialized state, if `zone` is null.
//! Resets this `ZoneAllocator` and also forget about the current `Zone` which is attached (if any). Reset
//! optionally attaches a new `zone` passed, or keeps the `ZoneAllocator` in an uninitialized state, if
//! `zone` is null.
ASMJIT_API void reset(Zone* zone = nullptr) noexcept;
//! \}
@@ -506,8 +475,7 @@ public:
//! \name Accessors
//! \{
//! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator`
//! is not initialized.
//! Returns the assigned `Zone` of this allocator or null if this `ZoneAllocator` is not initialized.
inline Zone* zone() const noexcept { return _zone; }
//! \}
@@ -516,10 +484,10 @@ public:
//! \name Internals
//! \{
//! Returns the slot index to be used for `size`. Returns `true` if a valid slot
//! has been written to `slot` and `allocatedSize` has been filled with slot
//! exact size (`allocatedSize` can be equal or slightly greater than `size`).
static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
//! Returns the slot index to be used for `size`. Returns `true` if a valid slot has been written to `slot` and
//! `allocatedSize` has been filled with slot exact size (`allocatedSize` can be equal or slightly greater than
//! `size`).
static inline bool _getSlotIndex(size_t size, uint32_t& slot) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
return false;
@@ -533,7 +501,7 @@ public:
}
//! \overload
static ASMJIT_INLINE bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
static inline bool _getSlotIndex(size_t size, uint32_t& slot, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(size > 0);
if (size > kHiMaxSize)
return false;
@@ -571,9 +539,8 @@ public:
return _alloc(size, allocatedSize);
}
//! Like `alloc(size)`, but provides a second argument `allocatedSize` that
//! provides a way to know how big the block returned actually is. This is
//! useful for containers to prevent growing too early.
//! Like `alloc(size)`, but provides a second argument `allocatedSize` that provides a way to know how big
//! the block returned actually is. This is useful for containers to prevent growing too early.
inline void* alloc(size_t size, size_t& allocatedSize) noexcept {
ASMJIT_ASSERT(isInitialized());
return _alloc(size, allocatedSize);
@@ -621,9 +588,8 @@ public:
return new(p) T(std::forward<Args>(args)...);
}
//! Releases the memory previously allocated by `alloc()`. The `size` argument
//! has to be the same as used to call `alloc()` or `allocatedSize` returned
//! by `alloc()`.
//! Releases the memory previously allocated by `alloc()`. The `size` argument has to be the same as used to call
//! `alloc()` or `allocatedSize` returned by `alloc()`.
inline void release(void* p, size_t size) noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(p != nullptr);

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
@@ -28,9 +10,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneHashBase - Helpers]
// ============================================================================
// ZoneHashBase - Prime Numbers
// ============================
#define ASMJIT_POPULATE_PRIMES(ENTRY) \
ENTRY(2 , 0x80000000, 32), /* [N * 0x80000000 >> 32] (rcp=2147483648) */ \
@@ -183,9 +164,8 @@ static const uint8_t ZoneHash_primeShift[] = {
#undef E
};
// ============================================================================
// [asmjit::ZoneHashBase - Rehash]
// ============================================================================
// ZoneHashBase - Rehash
// =====================
void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexcept {
ASMJIT_ASSERT(primeIndex < ASMJIT_ARRAY_SIZE(ZoneHash_primeArray));
@@ -225,9 +205,8 @@ void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexce
allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
}
// ============================================================================
// [asmjit::ZoneHashBase - Ops]
// ============================================================================
// ZoneHashBase - Operations
// =========================
ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
uint32_t hashMod = _calcMod(node->_hashCode);
@@ -266,9 +245,8 @@ ZoneHashNode* ZoneHashBase::_remove(ZoneAllocator* allocator, ZoneHashNode* node
return nullptr;
}
// ============================================================================
// [asmjit::ZoneHash - Unit]
// ============================================================================
// ZoneHashBase - Tests
// ====================
#if defined(ASMJIT_TEST)
struct MyHashNode : public ZoneHashNode {

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONEHASH_H_INCLUDED
#define ASMJIT_CORE_ZONEHASH_H_INCLUDED
@@ -31,14 +13,9 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneHashNode]
// ============================================================================
//! Node used by \ref ZoneHash template.
//!
//! You must provide function `bool eq(const Key& key)` in order to make
//! `ZoneHash::get()` working.
//! You must provide function `bool eq(const Key& key)` in order to make `ZoneHash::get()` working.
class ZoneHashNode {
public:
ASMJIT_NONCOPYABLE(ZoneHashNode)
@@ -56,10 +33,6 @@ public:
uint32_t _customData;
};
// ============================================================================
// [asmjit::ZoneHashBase]
// ============================================================================
//! Base class used by \ref ZoneHash template
class ZoneHashBase {
public:
@@ -162,16 +135,11 @@ public:
//! \}
};
// ============================================================================
// [asmjit::ZoneHash]
// ============================================================================
//! Low-level hash table specialized for storing string keys and POD values.
//!
//! This hash table allows duplicates to be inserted (the API is so low
//! level that it's up to you if you allow it or not, as you should first
//! `get()` the node and then modify it or insert a new node by using `insert()`,
//! depending on the intention).
//! This hash table allows duplicates to be inserted (the API is so low level that it's up to you if you allow it or
//! not, as you should first `get()` the node and then modify it or insert a new node by using `insert()`, depending
//! on the intention).
template<typename NodeT>
class ZoneHash : public ZoneHashBase {
public:

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/zone.h"
@@ -27,9 +9,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneList - Unit]
// ============================================================================
// ZoneList - Tests
// ================
#if defined(ASMJIT_TEST)
class MyListNode : public ZoneListNode<MyListNode> {};

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONELIST_H_INCLUDED
#define ASMJIT_CORE_ZONELIST_H_INCLUDED
@@ -31,17 +13,28 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneListNode]
// ============================================================================
//! Node used by \ref ZoneList template.
template<typename NodeT>
class ZoneListNode {
public:
ASMJIT_NONCOPYABLE(ZoneListNode)
NodeT* _listNodes[Globals::kLinkCount];
//! \name Constants
//! \{
enum : size_t {
kNodeIndexPrev = 0,
kNodeIndexNext = 1
};
//! \}
//! \name Members
//! \{
NodeT* _listNodes[2];
//! \}
//! \name Construction & Destruction
//! \{
@@ -57,26 +50,37 @@ public:
//! \name Accessors
//! \{
inline bool hasPrev() const noexcept { return _listNodes[Globals::kLinkPrev] != nullptr; }
inline bool hasNext() const noexcept { return _listNodes[Globals::kLinkNext] != nullptr; }
inline bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; }
inline bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; }
inline NodeT* prev() const noexcept { return _listNodes[Globals::kLinkPrev]; }
inline NodeT* next() const noexcept { return _listNodes[Globals::kLinkNext]; }
inline NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; }
inline NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; }
//! \}
};
// ============================================================================
// [asmjit::ZoneList<T>]
// ============================================================================
//! Zone allocated list container that uses nodes of `NodeT` type.
template <typename NodeT>
class ZoneList {
public:
ASMJIT_NONCOPYABLE(ZoneList)
NodeT* _nodes[Globals::kLinkCount];
//! \name Constants
//! \{
enum : size_t {
kNodeIndexFirst = 0,
kNodeIndexLast = 1
};
//! \}
//! \name Members
//! \{
NodeT* _nodes[2];
//! \}
//! \name Construction & Destruction
//! \{
@@ -98,8 +102,8 @@ public:
//! \{
inline bool empty() const noexcept { return _nodes[0] == nullptr; }
inline NodeT* first() const noexcept { return _nodes[Globals::kLinkFirst]; }
inline NodeT* last() const noexcept { return _nodes[Globals::kLinkLast]; }
inline NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; }
inline NodeT* last() const noexcept { return _nodes[kNodeIndexLast]; }
//! \}
@@ -140,11 +144,11 @@ public:
node->_listNodes[ dir] = next;
}
inline void append(NodeT* node) noexcept { _addNode(node, Globals::kLinkLast); }
inline void prepend(NodeT* node) noexcept { _addNode(node, Globals::kLinkFirst); }
inline void append(NodeT* node) noexcept { _addNode(node, kNodeIndexLast); }
inline void prepend(NodeT* node) noexcept { _addNode(node, kNodeIndexFirst); }
inline void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkNext); }
inline void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, Globals::kLinkPrev); }
inline void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, NodeT::kNodeIndexNext); }
inline void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, NodeT::kNodeIndexPrev); }
inline NodeT* unlink(NodeT* node) noexcept {
NodeT* prev = node->prev();

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/zone.h"
@@ -27,15 +9,14 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneStackBase - Init / Reset]
// ============================================================================
// ZoneStackBase - Init & Reset
// ============================
Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcept {
ZoneAllocator* oldAllocator = _allocator;
if (oldAllocator) {
Block* block = _block[Globals::kLinkFirst];
Block* block = _block[kBlockIndexFirst];
while (block) {
Block* next = block->next();
oldAllocator->release(block, kBlockSize);
@@ -43,8 +24,8 @@ Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcep
}
_allocator = nullptr;
_block[Globals::kLinkLeft] = nullptr;
_block[Globals::kLinkRight] = nullptr;
_block[kBlockIndexFirst] = nullptr;
_block[kBlockIndexLast] = nullptr;
}
if (allocator) {
@@ -52,22 +33,21 @@ Error ZoneStackBase::_init(ZoneAllocator* allocator, size_t middleIndex) noexcep
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorOutOfMemory);
block->_link[Globals::kLinkLeft] = nullptr;
block->_link[Globals::kLinkRight] = nullptr;
block->_link[kBlockIndexPrev] = nullptr;
block->_link[kBlockIndexNext] = nullptr;
block->_start = (uint8_t*)block + middleIndex;
block->_end = (uint8_t*)block + middleIndex;
_allocator = allocator;
_block[Globals::kLinkLeft] = block;
_block[Globals::kLinkRight] = block;
_block[kBlockIndexFirst] = block;
_block[kBlockIndexLast] = block;
}
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneStackBase - Ops]
// ============================================================================
// ZoneStackBase - Operations
// ==========================
Error ZoneStackBase::_prepareBlock(uint32_t side, size_t initialIndex) noexcept {
ASMJIT_ASSERT(isInitialized());
@@ -109,9 +89,8 @@ void ZoneStackBase::_cleanupBlock(uint32_t side, size_t middleIndex) noexcept {
}
}
// ============================================================================
// [asmjit::ZoneStack - Unit]
// ============================================================================
// ZoneStack - Tests
// =================
#if defined(ASMJIT_TEST)
template<typename T>

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONESTACK_H_INCLUDED
#define ASMJIT_CORE_ZONESTACK_H_INCLUDED
@@ -31,24 +13,43 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneStackBase]
// ============================================================================
//! Base class used by \ref ZoneStack.
class ZoneStackBase {
public:
ASMJIT_NONCOPYABLE(ZoneStackBase)
static constexpr uint32_t kBlockSize = ZoneAllocator::kHiMaxSize;
//! \name Constants
//! \{
enum : size_t {
kBlockIndexPrev = 0,
kBlockIndexNext = 1,
kBlockIndexFirst = 0,
kBlockIndexLast = 1,
kBlockSize = ZoneAllocator::kHiMaxSize
};
//! \}
//! \name Types
//! \{
struct Block {
inline bool empty() const noexcept { return _start == _end; }
inline Block* prev() const noexcept { return _link[Globals::kLinkLeft]; }
inline Block* next() const noexcept { return _link[Globals::kLinkRight]; }
//! Next and previous blocks.
Block* _link[2];
//! Pointer to the start of the array.
void* _start;
//! Pointer to the end of the array.
void* _end;
inline void setPrev(Block* block) noexcept { _link[Globals::kLinkLeft] = block; }
inline void setNext(Block* block) noexcept { _link[Globals::kLinkRight] = block; }
inline bool empty() const noexcept { return _start == _end; }
inline Block* prev() const noexcept { return _link[kBlockIndexPrev]; }
inline Block* next() const noexcept { return _link[kBlockIndexNext]; }
inline void setPrev(Block* block) noexcept { _link[kBlockIndexPrev] = block; }
inline void setNext(Block* block) noexcept { _link[kBlockIndexNext] = block; }
template<typename T>
inline T* start() const noexcept { return static_cast<T*>(_start); }
@@ -74,18 +75,21 @@ public:
return (uintptr_t)_end <= ((uintptr_t)this + kEndBlockIndex - sizeof(T));
}
Block* _link[Globals::kLinkCount]; //!< Next and previous blocks.
void* _start; //!< Pointer to the start of the array.
void* _end; //!< Pointer to the end of the array.
};
//! \}
//! \name Members
//! \{
//! Allocator used to allocate data.
ZoneAllocator* _allocator;
//! First and last blocks.
Block* _block[Globals::kLinkCount];
Block* _block[2];
//! \name Construction / Destruction
//! \}
//! \name Construction & Destruction
//! \{
inline ZoneStackBase() noexcept {
@@ -125,16 +129,15 @@ public:
//! \endcond
};
// ============================================================================
// [asmjit::ZoneStack<T>]
// ============================================================================
//! Zone allocated stack container.
template<typename T>
class ZoneStack : public ZoneStackBase {
public:
ASMJIT_NONCOPYABLE(ZoneStack)
//! \name Constants
//! \{
enum : uint32_t {
kNumBlockItems = uint32_t((kBlockSize - sizeof(Block)) / sizeof(T)),
kStartBlockIndex = uint32_t(sizeof(Block)),
@@ -142,7 +145,9 @@ public:
kEndBlockIndex = uint32_t(kStartBlockIndex + (kNumBlockItems ) * sizeof(T))
};
//! \name Construction / Destruction
//! \}
//! \name Construction & Destruction
//! \{
inline ZoneStack() noexcept {}
@@ -155,13 +160,13 @@ public:
//! \name Utilities
//! \{
ASMJIT_INLINE Error prepend(T item) noexcept {
inline Error prepend(T item) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* block = _block[Globals::kLinkFirst];
Block* block = _block[kBlockIndexFirst];
if (!block->canPrepend<T>()) {
ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkFirst, kEndBlockIndex));
block = _block[Globals::kLinkFirst];
ASMJIT_PROPAGATE(_prepareBlock(kBlockIndexFirst, kEndBlockIndex));
block = _block[kBlockIndexFirst];
}
T* ptr = block->start<T>() - 1;
@@ -171,13 +176,13 @@ public:
return kErrorOk;
}
ASMJIT_INLINE Error append(T item) noexcept {
inline Error append(T item) noexcept {
ASMJIT_ASSERT(isInitialized());
Block* block = _block[Globals::kLinkLast];
Block* block = _block[kBlockIndexLast];
if (!block->canAppend<T>()) {
ASMJIT_PROPAGATE(_prepareBlock(Globals::kLinkLast, kStartBlockIndex));
block = _block[Globals::kLinkLast];
ASMJIT_PROPAGATE(_prepareBlock(kBlockIndexLast, kStartBlockIndex));
block = _block[kBlockIndexLast];
}
T* ptr = block->end<T>();
@@ -188,11 +193,11 @@ public:
return kErrorOk;
}
ASMJIT_INLINE T popFirst() noexcept {
inline T popFirst() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
Block* block = _block[Globals::kLinkFirst];
Block* block = _block[kBlockIndexFirst];
ASMJIT_ASSERT(!block->empty());
T* ptr = block->start<T>();
@@ -200,16 +205,16 @@ public:
block->setStart(ptr);
if (block->empty())
_cleanupBlock(Globals::kLinkFirst, kMidBlockIndex);
_cleanupBlock(kBlockIndexFirst, kMidBlockIndex);
return item;
}
ASMJIT_INLINE T pop() noexcept {
inline T pop() noexcept {
ASMJIT_ASSERT(isInitialized());
ASMJIT_ASSERT(!empty());
Block* block = _block[Globals::kLinkLast];
Block* block = _block[kBlockIndexLast];
ASMJIT_ASSERT(!block->empty());
T* ptr = block->end<T>();
@@ -219,7 +224,7 @@ public:
block->setEnd(ptr);
if (block->empty())
_cleanupBlock(Globals::kLinkLast, kMidBlockIndex);
_cleanupBlock(kBlockIndexLast, kMidBlockIndex);
return item;
}

View File

@@ -1,28 +1,10 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_SMALLSTRING_H_INCLUDED
#define ASMJIT_CORE_SMALLSTRING_H_INCLUDED
#ifndef ASMJIT_CORE_ZONESTRING_H_INCLUDED
#define ASMJIT_CORE_ZONESTRING_H_INCLUDED
#include "../core/globals.h"
#include "../core/zone.h"
@@ -32,10 +14,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneStringBase]
// ============================================================================
//! A helper class used by \ref ZoneString implementation.
struct ZoneStringBase {
union {
@@ -74,28 +52,34 @@ struct ZoneStringBase {
}
};
// ============================================================================
// [asmjit::ZoneString<N>]
// ============================================================================
//! A string template that can be zone allocated.
//!
//! Helps with creating strings that can be either statically allocated if they
//! are small, or externally allocated in case their size exceeds the limit.
//! The `N` represents the size of the whole `ZoneString` structure, based on
//! Helps with creating strings that can be either statically allocated if they are small, or externally allocated
//! in case their size exceeds the limit. The `N` represents the size of the whole `ZoneString` structure, based on
//! that size the maximum size of the internal buffer is determined.
template<size_t N>
class ZoneString {
public:
static constexpr uint32_t kWholeSize =
(N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
static constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
//! \name Constants
//! \{
enum : uint32_t {
kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase)),
kMaxEmbeddedSize = kWholeSize - 5
};
//! \}
//! \name Members
//! \{
union {
ZoneStringBase _base;
char _wholeData[kWholeSize];
};
//! \}
//! \name Construction & Destruction
//! \{
@@ -120,9 +104,8 @@ public:
//! Copies a new `data` of the given `size` to the string.
//!
//! If the `size` exceeds the internal buffer the given `zone` will be
//! used to duplicate the data, otherwise the internal buffer will be
//! used as a storage.
//! If the `size` exceeds the internal buffer the given `zone` will be used to duplicate the data, otherwise
//! the internal buffer will be used as a storage.
inline Error setData(Zone* zone, const char* data, size_t size) noexcept {
return _base.setData(zone, kMaxEmbeddedSize, data, size);
}
@@ -134,4 +117,4 @@ public:
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_SMALLSTRING_H_INCLUDED
#endif // ASMJIT_CORE_ZONESTRING_H_INCLUDED

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
@@ -28,9 +10,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneTree - Unit]
// ============================================================================
// ZoneTreeBase - Tests
// ====================
#if defined(ASMJIT_TEST)
template<typename NodeT>

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONETREE_H_INCLUDED
#define ASMJIT_CORE_ZONETREE_H_INCLUDED
@@ -31,10 +13,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneTreeNode]
// ============================================================================
//! RB-Tree node.
//!
//! The color is stored in a least significant bit of the `left` node.
@@ -44,12 +22,22 @@ class ZoneTreeNode {
public:
ASMJIT_NONCOPYABLE(ZoneTreeNode)
//! \name Constants
//! \{
enum : uintptr_t {
kRedMask = 0x1,
kPtrMask = ~kRedMask
};
uintptr_t _rbNodeData[Globals::kLinkCount];
//! \}
//! \name Members
//! \{
uintptr_t _rbNodeData[2];
//! \}
//! \name Construction & Destruction
//! \{
@@ -123,10 +111,6 @@ public:
//! \}
};
// ============================================================================
// [asmjit::ZoneTree]
// ============================================================================
//! RB-Tree.
template<typename NodeT>
class ZoneTree {
@@ -164,7 +148,7 @@ public:
std::swap(_root, other._root);
}
template<typename CompareT = Support::Compare<Support::kSortAscending>>
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
void insert(NodeT* node, const CompareT& cmp = CompareT()) noexcept {
// Node to insert must not contain garbage.
ASMJIT_ASSERT(!node->hasLeft());
@@ -176,18 +160,18 @@ public:
return;
}
ZoneTreeNode head; // False root node,
head._setRight(_root); // having root on the right.
ZoneTreeNode head; // False root node,
head._setRight(_root); // having root on the right.
ZoneTreeNode* g = nullptr; // Grandparent.
ZoneTreeNode* p = nullptr; // Parent.
ZoneTreeNode* t = &head; // Iterator.
ZoneTreeNode* q = _root; // Query.
ZoneTreeNode* g = nullptr; // Grandparent.
ZoneTreeNode* p = nullptr; // Parent.
ZoneTreeNode* t = &head; // Iterator.
ZoneTreeNode* q = _root; // Query.
size_t dir = 0; // Direction for accessing child nodes.
size_t last = 0; // Not needed to initialize, but makes some tools happy.
size_t dir = 0; // Direction for accessing child nodes.
size_t last = 0; // Not needed to initialize, but makes some tools happy.
node->_makeRed(); // New nodes are always red and violations fixed appropriately.
node->_makeRed(); // New nodes are always red and violations fixed appropriately.
// Search down the tree.
for (;;) {
@@ -229,7 +213,7 @@ public:
}
//! Remove node from RBTree.
template<typename CompareT = Support::Compare<Support::kSortAscending>>
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
void remove(ZoneTreeNode* node, const CompareT& cmp = CompareT()) noexcept {
ZoneTreeNode head; // False root node,
head._setRight(_root); // having root on the right.
@@ -304,10 +288,9 @@ public:
p->_setChild(p->_getRight() == q,
q->_getChild(q->_getLeft() == nullptr));
// NOTE: The original algorithm used a trick to just copy 'key/value' to
// `f` and mark `q` for deletion. But this is unacceptable here as we
// really want to destroy the passed `node`. So, we have to make sure that
// we have really removed `f` and not `q`.
// NOTE: The original algorithm used a trick to just copy 'key/value' to `f` and mark `q` for deletion. But this
// is unacceptable here as we really want to destroy the passed `node`. So, we have to make sure that we have
// really removed `f` and not `q`.
if (f != q) {
ASMJIT_ASSERT(f != &head);
ASMJIT_ASSERT(f != gf);
@@ -337,8 +320,8 @@ public:
if (_root) _root->_makeBlack();
}
template<typename KeyT, typename CompareT = Support::Compare<Support::kSortAscending>>
ASMJIT_INLINE NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
template<typename KeyT, typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
ZoneTreeNode* node = _root;
while (node) {
auto result = cmp(*static_cast<const NodeT*>(node), key);
@@ -359,7 +342,7 @@ public:
static inline bool _isValidRed(ZoneTreeNode* node) noexcept { return ZoneTreeNode::_isValidRed(node); }
//! Single rotation.
static ASMJIT_INLINE ZoneTreeNode* _singleRotate(ZoneTreeNode* root, size_t dir) noexcept {
static inline ZoneTreeNode* _singleRotate(ZoneTreeNode* root, size_t dir) noexcept {
ZoneTreeNode* save = root->_getChild(!dir);
root->_setChild(!dir, save->_getChild(dir));
save->_setChild( dir, root);
@@ -369,7 +352,7 @@ public:
}
//! Double rotation.
static ASMJIT_INLINE ZoneTreeNode* _doubleRotate(ZoneTreeNode* root, size_t dir) noexcept {
static inline ZoneTreeNode* _doubleRotate(ZoneTreeNode* root, size_t dir) noexcept {
root->_setChild(!dir, _singleRotate(root->_getChild(!dir), !dir));
return _singleRotate(root, dir);
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
@@ -28,9 +10,8 @@
ASMJIT_BEGIN_NAMESPACE
// ============================================================================
// [asmjit::ZoneVectorBase - Helpers]
// ============================================================================
// ZoneVectorBase - Helpers
// ========================
Error ZoneVectorBase::_grow(ZoneAllocator* allocator, uint32_t sizeOfT, uint32_t n) noexcept {
uint32_t threshold = Globals::kGrowThreshold / sizeOfT;
@@ -112,9 +93,8 @@ Error ZoneVectorBase::_resize(ZoneAllocator* allocator, uint32_t sizeOfT, uint32
return kErrorOk;
}
// ============================================================================
// [asmjit::ZoneBitVector - Ops]
// ============================================================================
// ZoneBitVector - Operations
// ==========================
Error ZoneBitVector::copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept {
BitWord* data = _data;
@@ -280,9 +260,8 @@ Error ZoneBitVector::_append(ZoneAllocator* allocator, bool value) noexcept {
return _resize(allocator, newSize, idealCapacity, value);
}
// ============================================================================
// [asmjit::ZoneVector / ZoneBitVector - Unit]
// ============================================================================
// ZoneVector / ZoneBitVector - Tests
// ==================================
#if defined(ASMJIT_TEST)
template<typename T>

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
#define ASMJIT_CORE_ZONEVECTOR_H_INCLUDED
@@ -32,10 +14,6 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \{
// ============================================================================
// [asmjit::ZoneVectorBase]
// ============================================================================
//! Base class used by \ref ZoneVector template.
class ZoneVectorBase {
public:
@@ -129,10 +107,6 @@ public:
//! \}
};
// ============================================================================
// [asmjit::ZoneVector<T>]
// ============================================================================
//! Template used to store and manage array of Zone allocated data.
//!
//! This template has these advantages over other std::vector<>:
@@ -213,10 +187,10 @@ public:
//! \{
//! Swaps this vector with `other`.
inline void swap(ZoneVector<T>& other) noexcept { _swap(other); }
ASMJIT_FORCE_INLINE void swap(ZoneVector<T>& other) noexcept { _swap(other); }
//! Prepends `item` to the vector.
inline Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
ASMJIT_FORCE_INLINE Error prepend(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_PROPAGATE(grow(allocator, 1));
@@ -228,7 +202,7 @@ public:
}
//! Inserts an `item` at the specified `index`.
inline Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_FORCE_INLINE Error insert(ZoneAllocator* allocator, size_t index, const T& item) noexcept {
ASMJIT_ASSERT(index <= _size);
if (ASMJIT_UNLIKELY(_size == _capacity))
@@ -243,7 +217,7 @@ public:
}
//! Appends `item` to the vector.
inline Error append(ZoneAllocator* allocator, const T& item) noexcept {
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, const T& item) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity))
ASMJIT_PROPAGATE(grow(allocator, 1));
@@ -254,7 +228,7 @@ public:
}
//! Appends `other` vector at the end of this vector.
inline Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
ASMJIT_FORCE_INLINE Error concat(ZoneAllocator* allocator, const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
if (_capacity - _size < size)
ASMJIT_PROPAGATE(grow(allocator, size));
@@ -269,10 +243,9 @@ public:
//! Prepends `item` to the vector (unsafe case).
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns
//! `kErrorOk` then N elements can be added to the vector without checking
//! if there is a place for them. Used mostly internally.
inline void prependUnsafe(const T& item) noexcept {
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void prependUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
T* data = static_cast<T*>(_data);
@@ -285,10 +258,9 @@ public:
//! Append s`item` to the vector (unsafe case).
//!
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns
//! `kErrorOk` then N elements can be added to the vector without checking
//! if there is a place for them. Used mostly internally.
inline void appendUnsafe(const T& item) noexcept {
//! Can only be used together with `willGrow()`. If `willGrow(N)` returns `kErrorOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_FORCE_INLINE void appendUnsafe(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
memcpy(static_cast<T*>(_data) + _size, &item, sizeof(T));
@@ -296,7 +268,7 @@ public:
}
//! Inserts an `item` at the specified `index` (unsafe case).
inline void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_FORCE_INLINE void insertUnsafe(size_t index, const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
ASMJIT_ASSERT(index <= _size);
@@ -306,7 +278,7 @@ public:
_size++;
}
//! Concatenates all items of `other` at the end of the vector.
inline void concatUnsafe(const ZoneVector<T>& other) noexcept {
ASMJIT_FORCE_INLINE void concatUnsafe(const ZoneVector<T>& other) noexcept {
uint32_t size = other._size;
ASMJIT_ASSERT(_capacity - _size >= size);
@@ -317,7 +289,7 @@ public:
}
//! Returns index of the given `val` or `Globals::kNotFound` if it doesn't exist.
inline uint32_t indexOf(const T& val) const noexcept {
ASMJIT_FORCE_INLINE uint32_t indexOf(const T& val) const noexcept {
const T* data = static_cast<const T*>(_data);
uint32_t size = _size;
@@ -351,7 +323,7 @@ public:
return data()[index];
}
template<typename CompareT = Support::Compare<Support::kSortAscending>>
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
inline void sort(const CompareT& cmp = CompareT()) noexcept {
Support::qSort<T, CompareT>(data(), size(), cmp);
}
@@ -370,18 +342,16 @@ public:
//! Returns a reference to the first element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use
//! `first()` on empty vector will trigger an assertion failure in debug
//! builds.
//! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger
//! an assertion failure in debug builds.
inline T& first() noexcept { return operator[](0); }
//! \overload
inline const T& first() const noexcept { return operator[](0); }
//! Returns a reference to the last element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use
//! `last()` on empty vector will trigger an assertion failure in debug
//! builds.
//! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger
//! an assertion failure in debug builds.
inline T& last() noexcept { return operator[](_size - 1); }
//! \overload
inline const T& last() const noexcept { return operator[](_size - 1); }
@@ -403,9 +373,8 @@ public:
//! Resizes the vector to hold `n` elements.
//!
//! If `n` is greater than the current size then the additional elements'
//! content will be initialized to zero. If `n` is less than the current
//! size then the vector will be truncated to exactly `n` elements.
//! If `n` is greater than the current size then the additional elements' content will be initialized to zero.
//! If `n` is less than the current size then the vector will be truncated to exactly `n` elements.
inline Error resize(ZoneAllocator* allocator, uint32_t n) noexcept {
return ZoneVectorBase::_resize(allocator, sizeof(T), n);
}
@@ -422,15 +391,24 @@ public:
//! \}
};
// ============================================================================
// [asmjit::ZoneBitVector]
// ============================================================================
//! Zone-allocated bit vector.
class ZoneBitVector {
public:
typedef Support::BitWord BitWord;
static constexpr uint32_t kBitWordSizeInBits = Support::kBitWordSizeInBits;
ASMJIT_NONCOPYABLE(ZoneBitVector)
//! \name Constants
//! \{
enum : uint32_t {
kBitWordSizeInBits = Support::kBitWordSizeInBits
};
//! \}
//! \name Members
//! \{
//! Bits.
BitWord* _data = nullptr;
@@ -439,7 +417,7 @@ public:
//! Capacity of the bit-vector (in bits).
uint32_t _capacity = 0;
ASMJIT_NONCOPYABLE(ZoneBitVector)
//! \}
//! \cond INTERNAL
//! \name Internal
@@ -548,7 +526,7 @@ public:
Support::bitVectorFlipBit(_data, index);
}
ASMJIT_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
ASMJIT_FORCE_INLINE Error append(ZoneAllocator* allocator, bool value) noexcept {
uint32_t index = _size;
if (ASMJIT_UNLIKELY(index >= _capacity))
return _append(allocator, value);
@@ -567,35 +545,34 @@ public:
ASMJIT_API Error copyFrom(ZoneAllocator* allocator, const ZoneBitVector& other) noexcept;
inline void clearAll() noexcept {
ASMJIT_FORCE_INLINE void clearAll() noexcept {
_zeroBits(_data, _wordsPerBits(_size));
}
inline void fillAll() noexcept {
ASMJIT_FORCE_INLINE void fillAll() noexcept {
_fillBits(_data, _wordsPerBits(_size));
_clearUnusedBits();
}
inline void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_FORCE_INLINE void clearBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorClear(_data, start, count);
}
inline void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_FORCE_INLINE void fillBits(uint32_t start, uint32_t count) noexcept {
ASMJIT_ASSERT(start <= _size);
ASMJIT_ASSERT(_size - start >= count);
Support::bitVectorFill(_data, start, count);
}
//! Performs a logical bitwise AND between bits specified in this array and bits
//! in `other`. If `other` has less bits than `this` then all remaining bits are
//! set to zero.
//! Performs a logical bitwise AND between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are set to zero.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void and_(const ZoneBitVector& other) noexcept {
ASMJIT_FORCE_INLINE void and_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
@@ -615,12 +592,11 @@ public:
}
}
//! Performs a logical bitwise AND between bits specified in this array and
//! negated bits in `other`. If `other` has less bits than `this` then all
//! remaining bits are kept intact.
//! Performs a logical bitwise AND between bits specified in this array and negated bits in `other`. If `other`
//! has less bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void andNot(const ZoneBitVector& other) noexcept {
ASMJIT_FORCE_INLINE void andNot(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
@@ -629,12 +605,11 @@ public:
dst[i] = dst[i] & ~src[i];
}
//! Performs a logical bitwise OP between bits specified in this array and bits
//! in `other`. If `other` has less bits than `this` then all remaining bits
//! are kept intact.
//! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
inline void or_(const ZoneBitVector& other) noexcept {
ASMJIT_FORCE_INLINE void or_(const ZoneBitVector& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
@@ -644,15 +619,16 @@ public:
_clearUnusedBits();
}
inline void _clearUnusedBits() noexcept {
ASMJIT_FORCE_INLINE void _clearUnusedBits() noexcept {
uint32_t idx = _size / kBitWordSizeInBits;
uint32_t bit = _size % kBitWordSizeInBits;
if (!bit) return;
if (!bit)
return;
_data[idx] &= (BitWord(1) << bit) - 1u;
}
inline bool eq(const ZoneBitVector& other) const noexcept {
ASMJIT_FORCE_INLINE bool eq(const ZoneBitVector& other) const noexcept {
if (_size != other._size)
return false;
@@ -691,14 +667,14 @@ public:
class ForEachBitSet : public Support::BitVectorIterator<BitWord> {
public:
ASMJIT_INLINE explicit ForEachBitSet(const ZoneBitVector& bitVector) noexcept
inline explicit ForEachBitSet(const ZoneBitVector& bitVector) noexcept
: Support::BitVectorIterator<BitWord>(bitVector.data(), bitVector.sizeInBitWords()) {}
};
template<class Operator>
class ForEachBitOp : public Support::BitVectorOpIterator<BitWord, Operator> {
public:
ASMJIT_INLINE ForEachBitOp(const ZoneBitVector& a, const ZoneBitVector& b) noexcept
inline ForEachBitOp(const ZoneBitVector& a, const ZoneBitVector& b) noexcept
: Support::BitVectorOpIterator<BitWord, Operator>(a.data(), b.data(), a.sizeInBitWords()) {
ASMJIT_ASSERT(a.size() == b.size());
}

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_X86_H_INCLUDED
#define ASMJIT_X86_H_INCLUDED
@@ -40,15 +22,14 @@
//! ### Supported Instructions
//!
//! - Emitters:
//! - \ref x86::EmitterExplicitT - Provides all instructions that use
//! explicit operands, provides also utility functions. The member
//! functions provided are part of all X86 emitters.
//! - \ref x86::EmitterImplicitT - Provides all instructions that use
//! implicit operands, these cannot be used with \ref x86::Compiler.
//! - \ref x86::EmitterExplicitT - Provides all instructions that use explicit operands, provides also utility
//! functions. The member functions provided are part of all X86 emitters.
//! - \ref x86::EmitterImplicitT - Provides all instructions that use implicit operands, these cannot be used
//! with \ref x86::Compiler.
//!
//! - Instruction representation:
//! - \ref x86::Inst::Id - instruction identifiers.
//! - \ref x86::Inst::Options - instruction options.
//! - \ref x86::Inst::Id - Provides instruction identifiers for both X86/X86_64 architectures.
//! - \ref InstOptions - Provides generic and X86/X86_64 specific options.
//!
//! ### Register Operands
//!
@@ -74,34 +55,28 @@
//!
//! ### Memory Operands
//!
//! - \ref x86::Mem - X86/X64 memory operand that provides support for all
//! X86 and X64 addressing features including absolute addresses, index
//! scales, and segment override prefixes.
//!
//! ### Other
//!
//! - \ref x86::Features - X86/X64 CPU features on top of \ref BaseFeatures.
//! - \ref x86::Mem - X86/X64 memory operand that provides support for all X86 and X64 addressing features
//! including absolute addresses, index scales, and segment override prefixes.
//!
//! ### Status and Control Words
//!
//! - \ref asmjit::x86::FpuWord::Status - FPU status word.
//! - \ref asmjit::x86::FpuWord::Control - FPU control word.
//! - \ref x86::FpuStatusWord - FPU status word bits / decomposition.
//! - \ref x86::FpuControlWord - FPU control word bits / decomposition.
//!
//! ### Predicates
//! ### Predicates (immediate values)
//!
//! - \ref x86::Predicate - namespace that provides X86/X64 predicates.
//! - \ref x86::Predicate::Cmp - `CMP[PD|PS|SD|SS]` predicate (SSE+).
//! - \ref x86::Predicate::PCmpStr - `[V]PCMP[I|E]STR[I|M]` predicate (SSE4.1+).
//! - \ref x86::Predicate::Round - `ROUND[PD|PS|SD|SS]` predicate (SSE+).
//! - \ref x86::Predicate::VCmp - `VCMP[PD|PS|SD|SS]` predicate (AVX+).
//! - \ref x86::Predicate::VFixupImm - `VFIXUPIMM[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VFPClass - `VFPCLASS[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VGetMant - `VGETMANT[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VPCmp - `VPCMP[U][B|W|D|Q]` predicate (AVX512+).
//! - \ref x86::Predicate::VPCom - `VPCOM[U][B|W|D|Q]` predicate (XOP).
//! - \ref x86::Predicate::VRange - `VRANGE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::Predicate::VReduce - `REDUCE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::TLog - namespace that provides `VPTERNLOG[D|Q]` predicate / operations.
//! - \ref x86::CmpImm - `CMP[PD|PS|SD|SS]` predicate (SSE+).
//! - \ref x86::PCmpStrImm - `[V]PCMP[I|E]STR[I|M]` predicate (SSE4.1+, AVX+).
//! - \ref x86::RoundImm - `[V]ROUND[PD|PS|SD|SS]` predicate (SSE+, AVX+).
//! - \ref x86::VCmpImm - `VCMP[PD|PS|SD|SS]` predicate (AVX+).
//! - \ref x86::VFixupImm - `VFIXUPIMM[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::VFPClassImm - `VFPCLASS[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::VGetMantImm - `VGETMANT[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::VPCmpImm - `VPCMP[U][B|W|D|Q]` predicate (AVX512+).
//! - \ref x86::VPComImm - `VPCOM[U][B|W|D|Q]` predicate (XOP).
//! - \ref x86::VRangeImm - `VRANGE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::VReduceImm - `REDUCE[PD|PS|SD|SS]` predicate (AVX512+).
//! - \ref x86::TLogImm - `VPTERNLOG[D|Q]` predicate and operations (AVX512+).
#include "core.h"
@@ -110,7 +85,6 @@
#include "x86/x86builder.h"
#include "x86/x86compiler.h"
#include "x86/x86emitter.h"
#include "x86/x86features.h"
#include "x86/x86globals.h"
#include "x86/x86instdb.h"
#include "x86/x86operand.h"

View File

@@ -1,25 +1,7 @@
// AsmJit - Machine code generation for C++
// This file is part of AsmJit project <https://asmjit.com>
//
// * Official AsmJit Home Page: https://asmjit.com
// * Official Github Repository: https://github.com/asmjit/asmjit
//
// Copyright (c) 2008-2020 The AsmJit Authors
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
// See asmjit.h or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_X86_X86ARCHTRAITS_P_H_INCLUDED
#define ASMJIT_X86_X86ARCHTRAITS_P_H_INCLUDED
@@ -34,10 +16,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(x86)
//! \addtogroup asmjit_x86
//! \{
// ============================================================================
// [asmjit::x86::x86ArchTraits
// ============================================================================
//! X86 architecture traits (internal).
static const constexpr ArchTraits x86ArchTraits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdBp, 0xFF, 0xFF,
@@ -52,51 +31,53 @@ static const constexpr ArchTraits x86ArchTraits = {
0x7FFFFFFFu, 0x7FFFFFFFu,
// ISA features [Gp, Vec, Other0, Other1].
{ ArchTraits::kIsaFeatureSwap | ArchTraits::kIsaFeaturePushPop, 0, 0, 0 },
{{
InstHints::kRegSwap | InstHints::kPushPop,
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegInfo.
#define V(index) { x86::RegTraits<index>::kSignature }
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
// Register signatures.
#define V(index) OperandSignature(x86::RegTraits<RegType(index)>::kSignature)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
#define V(index) x86::RegTraits<index>::kTypeId
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
#define V(index) TypeId(x86::RegTraits<RegType(index)>::kTypeId)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
#define V(index) (index + Type::_kIdBaseStart == Type::kIdI8 ? Reg::kTypeGpbLo : \
index + Type::_kIdBaseStart == Type::kIdU8 ? Reg::kTypeGpbLo : \
index + Type::_kIdBaseStart == Type::kIdI16 ? Reg::kTypeGpw : \
index + Type::_kIdBaseStart == Type::kIdU16 ? Reg::kTypeGpw : \
index + Type::_kIdBaseStart == Type::kIdI32 ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdU32 ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdIntPtr ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdUIntPtr ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdF32 ? Reg::kTypeXmm : \
index + Type::_kIdBaseStart == Type::kIdF64 ? Reg::kTypeXmm : \
index + Type::_kIdBaseStart == Type::kIdMask8 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask16 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask32 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask64 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMmx32 ? Reg::kTypeMm : \
index + Type::_kIdBaseStart == Type::kIdMmx64 ? Reg::kTypeMm : Reg::kTypeNone)
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kX86_GpbLo : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kX86_GpbLo : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kX86_Gpw : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kX86_Gpw : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kX86_Xmm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kX86_Xmm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask8) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask16) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask32) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask64) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMmx32) ? RegType::kX86_Mm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMmx64) ? RegType::kX86_Mm : RegType::kNone)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ISAWordNameId::kDB,
ISAWordNameId::kDW,
ISAWordNameId::kDD,
ISAWordNameId::kDQ
ArchTypeNameId::kDB,
ArchTypeNameId::kDW,
ArchTypeNameId::kDD,
ArchTypeNameId::kDQ
}
};
// ============================================================================
// [asmjit::x86::x64ArchTraits
// ============================================================================
//! X64 architecture traits (internal).
static const constexpr ArchTraits x64ArchTraits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdBp, 0xFF, 0xFF,
@@ -111,46 +92,51 @@ static const constexpr ArchTraits x64ArchTraits = {
0x7FFFFFFFu, 0x7FFFFFFFu,
// ISA features [Gp, Vec, Other0, Other1].
{ ArchTraits::kIsaFeatureSwap | ArchTraits::kIsaFeaturePushPop, 0, 0, 0 },
{{
InstHints::kRegSwap | InstHints::kPushPop,
InstHints::kNoHints,
InstHints::kNoHints,
InstHints::kNoHints
}},
// RegInfo.
#define V(index) { x86::RegTraits<index>::kSignature }
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
// Register signatures.
#define V(index) OperandSignature(x86::RegTraits<RegType(index)>::kSignature)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// RegTypeToTypeId.
#define V(index) x86::RegTraits<index>::kTypeId
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
#define V(index) TypeId(x86::RegTraits<RegType(index)>::kTypeId)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// TypeIdToRegType.
#define V(index) (index + Type::_kIdBaseStart == Type::kIdI8 ? Reg::kTypeGpbLo : \
index + Type::_kIdBaseStart == Type::kIdU8 ? Reg::kTypeGpbLo : \
index + Type::_kIdBaseStart == Type::kIdI16 ? Reg::kTypeGpw : \
index + Type::_kIdBaseStart == Type::kIdU16 ? Reg::kTypeGpw : \
index + Type::_kIdBaseStart == Type::kIdI32 ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdU32 ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdI64 ? Reg::kTypeGpq : \
index + Type::_kIdBaseStart == Type::kIdU64 ? Reg::kTypeGpq : \
index + Type::_kIdBaseStart == Type::kIdIntPtr ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdUIntPtr ? Reg::kTypeGpd : \
index + Type::_kIdBaseStart == Type::kIdF32 ? Reg::kTypeXmm : \
index + Type::_kIdBaseStart == Type::kIdF64 ? Reg::kTypeXmm : \
index + Type::_kIdBaseStart == Type::kIdMask8 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask16 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask32 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMask64 ? Reg::kTypeKReg : \
index + Type::_kIdBaseStart == Type::kIdMmx32 ? Reg::kTypeMm : \
index + Type::_kIdBaseStart == Type::kIdMmx64 ? Reg::kTypeMm : Reg::kTypeNone)
{ ASMJIT_LOOKUP_TABLE_32(V, 0) },
#define V(index) (index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt8) ? RegType::kX86_GpbLo : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt8) ? RegType::kX86_GpbLo : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt16) ? RegType::kX86_Gpw : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt16) ? RegType::kX86_Gpw : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt32) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt32) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kInt64) ? RegType::kX86_Gpq : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUInt64) ? RegType::kX86_Gpq : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kIntPtr) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kUIntPtr) ? RegType::kX86_Gpd : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat32) ? RegType::kX86_Xmm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kFloat64) ? RegType::kX86_Xmm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask8) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask16) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask32) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMask64) ? RegType::kX86_KReg : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMmx32) ? RegType::kX86_Mm : \
index + uint32_t(TypeId::_kBaseStart) == uint32_t(TypeId::kMmx64) ? RegType::kX86_Mm : RegType::kNone)
{{ ASMJIT_LOOKUP_TABLE_32(V, 0) }},
#undef V
// Word names of 8-bit, 16-bit, 32-bit, and 64-bit quantities.
{
ISAWordNameId::kDB,
ISAWordNameId::kDW,
ISAWordNameId::kDD,
ISAWordNameId::kDQ
ArchTypeNameId::kDB,
ArchTypeNameId::kDW,
ArchTypeNameId::kDD,
ArchTypeNameId::kDQ
}
};

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More