[abi] AsmJit v1.18 - performance and memory footprint improvements

* Refactored the whole codebase to use snake_case convention to
    name functions and variables, including member variables.
    Class naming is unchanged and each starts with upper-case
    character. The intention of this change is to make the source
    code more readable and consistent across multiple projects
    where AsmJit is currently used.

  * Refactored support.h to make it more shareable across projects.

  * x86::Vec now inherits from UniVec

  * minor changes in JitAllocator and WriteScope in order to make
    the size of WriteScope smaller

  * added ZoneStatistics and Zone::statistics() getter

  * improved x86::EmitHelper to use tables instead of choose() and
    other mechanisms to pick between SSE and AVX instructions

  * Refactored the whole codebase to use snake_case convention for
    for functions names, function parameter names, struct members,
    and variables

  * Added a non-owning asmjit::Span<T> type and use into public API
    to hide the usage of ZoneVector in CodeHolder, Builder, and
    Compiler. Users now only get Span (with data and size), which
    doesn't require users to know about ZoneVector

  * Removed RAWorkId from RATiedReg in favor of RAWorkReg*

  * Removed GEN from LiveInfo as it's not needed by CFG construction
    to save memory (GEN was merged with LIVE-IN bits). The remaining
    LIVE-IN, LIVE-OUT, and KILL bits are enough, however KILL bits may
    be removed in the future as KILL bits are not needed after LIVE-IN
    and LIVE-OUT converged

  * Optimized the representation of LIVE-IN, LIVE-OUT, and KILL bits
    per block. Now only registers that live across multiple basic
    blocks are included here, which means that virtual registers that
    only live in a single block are not included and won't be overhead
    during liveness analysis. This optimization alone can make liveness
    analysis 90% faster depending on the code generated (more virtual
    registers that only live in a single basic block -> more gains)

  * Optimized building liveness information bits per block. The new
    code uses an optimized algorithm to prevent too many traversals
    and uses a more optimized code for a case in which not too many
    registers are used (it avoids array operations if the number of
    all virtual registers within the function fits a single BitWord)

  * Optimized code that computes which virtual register is only used
    in a single basic block - this aims to optimize register allocator
    in the future by using a designed code path for allocating regs
    only used in a single basic block

  * Reduced the information required for each live-span, which is used
    by bin-packing. Now the struct is 8 bytes, which is good for a lot
    of optimizations C++ compiler can do

  * Added UniCompiler (ujit) which can be used to share code paths
    between X86, X86_64, and AArch64 code generation (experimental).
This commit is contained in:
kobalicek
2025-09-06 13:43:15 +02:00
parent a3199e8857
commit 7596c6d035
211 changed files with 56385 additions and 33857 deletions

View File

@@ -18,6 +18,7 @@
{ "optional": true, "cmd": ["asmjit_test_assembler", "--validate"] },
{ "optional": true, "cmd": ["asmjit_test_emitters"] },
{ "optional": true, "cmd": ["asmjit_test_compiler"] },
{ "optional": true, "cmd": ["asmjit_test_unicompiler"] },
{ "optional": true, "cmd": ["asmjit_test_instinfo"] },
{ "optional": true, "cmd": ["asmjit_test_x86_sections"] },
{ "optional": true, "cmd": ["asmjit_bench_codegen", "--quick"] }

View File

@@ -42,7 +42,6 @@ jobs:
- { title: "linux/hardened" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1", diagnostics: "hardened", }
- { title: "linux/valgrind" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1", diagnostics: "valgrind", }
- { title: "linux/no-deprecated" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_DEPRECATED=1" }
- { title: "linux/no-intrinsics" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_INTRINSICS=1" }
- { title: "linux/no-logging" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_LOGGING=1" }
- { title: "linux/no-logging-text" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_LOGGING=1,ASMJIT_NO_TEXT=1" }
- { title: "linux/no-builder" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_BUILDER=1" }
@@ -54,6 +53,7 @@ jobs:
- { title: "linux/no-aarch64" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Release", defs: "ASMJIT_TEST=1,ASMJIT_NO_AARCH64=1" }
- { title: "linux/use-c++20" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++20" }
- { title: "linux/use-c++23" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-std=c++23" }
- { title: "linux/use-avx2+bmi2" , host: "ubuntu-24.04" , arch: "x64" , cc: "clang-19", conf: "Debug" , defs: "ASMJIT_TEST=1,CMAKE_CXX_FLAGS=-mavx2 -mbmi2" }
- { title: "linux" , host: "ubuntu-24.04" , arch: "x86" , cc: "gcc-9" , conf: "Debug" , defs: "ASMJIT_TEST=1" }
- { title: "linux" , host: "ubuntu-24.04" , arch: "x86" , cc: "gcc-9" , conf: "Release", defs: "ASMJIT_TEST=1" }
- { title: "linux" , host: "ubuntu-24.04" , arch: "x64" , cc: "gcc-9" , conf: "Debug" , defs: "ASMJIT_TEST=1" }
@@ -166,7 +166,7 @@ jobs:
--architecture=${{matrix.arch}}
--problem-matcher=auto
--build-type=${{matrix.conf}}
--build-defs=${{matrix.defs}}
--build-defs="${{matrix.defs}}"
- name: "Build & Test - Cross Platform Actions"
if: ${{matrix.vm && matrix.vm_ver}}
@@ -196,7 +196,7 @@ jobs:
--architecture=${{matrix.arch}} \
--problem-matcher=auto \
--build-type=${{matrix.conf}} \
--build-defs=${{matrix.defs}}
--build-defs="${{matrix.defs}}"
- name: "Build & Test - Docker + QEMU"
if: ${{matrix.vm && !matrix.vm_ver}}
@@ -215,4 +215,4 @@ jobs:
--architecture=${{matrix.arch}} \
--problem-matcher=auto \
--build-type=${{matrix.conf}} \
--build-defs=${{matrix.defs}}
--build-defs="${{matrix.defs}}"

View File

@@ -105,6 +105,14 @@ if (NOT DEFINED ASMJIT_NO_COMPILER)
endif()
endif()
if (NOT DEFINED ASMJIT_NO_UJIT)
if (ASMJIT_NO_COMPILER)
set(ASMJIT_NO_UJIT TRUE)
else()
set(ASMJIT_NO_UJIT FALSE)
endif()
endif()
# AsmJit - Configuration - CMake Introspection
# ============================================
@@ -131,6 +139,7 @@ set(ASMJIT_NO_VALIDATION "${ASMJIT_NO_VALIDATION}" CACHE BOOL "Disable ins
set(ASMJIT_NO_INTROSPECTION "${ASMJIT_NO_INTROSPECTION}" CACHE BOOL "Disable instruction introspection API at build time")
set(ASMJIT_NO_BUILDER "${ASMJIT_NO_BUILDER}" CACHE BOOL "Disable Builder at build time")
set(ASMJIT_NO_COMPILER "${ASMJIT_NO_COMPILER}" CACHE BOOL "Disable Compiler at build time")
set(ASMJIT_NO_UJIT "${ASMJIT_NO_UJIT}" CACHE BOOL "Disable UniCompiler at build time")
# AsmJit - Project
# ================
@@ -239,8 +248,17 @@ if (NOT ASMJIT_NO_CUSTOM_FLAGS)
list(APPEND ASMJIT_PRIVATE_CFLAGS -fno-math-errno)
list(APPEND ASMJIT_PRIVATE_CFLAGS_REL -O2)
# We would like also '-Wzero-as-null-pointer-constant' but it would warn when it comes to system headers.
asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS
-Wdouble-promotion
-Wduplicated-cond
-Wduplicated-branches
-Wlogical-op
-Wrestrict
)
# -fno-semantic-interposition is not available on apple - the compiler issues a warning, which is not detected.
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
if (APPLE)
asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -fno-threadsafe-statics)
else()
asmjit_detect_cflags(ASMJIT_PRIVATE_CFLAGS -fno-threadsafe-statics -fno-semantic-interposition)
@@ -352,7 +370,8 @@ foreach(build_option # AsmJit build options.
ASMJIT_NO_INTROSPECTION
ASMJIT_NO_VALIDATION
ASMJIT_NO_BUILDER
ASMJIT_NO_COMPILER)
ASMJIT_NO_COMPILER
ASMJIT_NO_UJIT)
if (${build_option})
List(APPEND ASMJIT_CFLAGS "-D${build_option}")
List(APPEND ASMJIT_PRIVATE_CFLAGS "-D${build_option}")
@@ -379,6 +398,20 @@ set(ASMJIT_SRC_LIST
asmjit/core.h
asmjit/core/api-build_p.h
asmjit/core/api-config.h
asmjit/core/arena.cpp
asmjit/core/arena.h
asmjit/core/arenabitset.cpp
asmjit/core/arenabitset_p.h
asmjit/core/arenahash.cpp
asmjit/core/arenahash.h
asmjit/core/arenalist.cpp
asmjit/core/arenalist.h
asmjit/core/arenapool.h
asmjit/core/arenastring.h
asmjit/core/arenatree.cpp
asmjit/core/arenatree.h
asmjit/core/arenavector.cpp
asmjit/core/arenavector.h
asmjit/core/archtraits.cpp
asmjit/core/archtraits.h
asmjit/core/archcommons.h
@@ -434,8 +467,11 @@ set(ASMJIT_SRC_LIST
asmjit/core/osutils.h
asmjit/core/osutils_p.h
asmjit/core/raassignment_p.h
asmjit/core/rabuilders_p.h
asmjit/core/racfgblock_p.h
asmjit/core/racfgbuilder_p.h
asmjit/core/raconstraints_p.h
asmjit/core/radefs_p.h
asmjit/core/rainst_p.h
asmjit/core/ralocal.cpp
asmjit/core/ralocal_p.h
asmjit/core/rapass.cpp
@@ -452,19 +488,6 @@ set(ASMJIT_SRC_LIST
asmjit/core/type.h
asmjit/core/virtmem.cpp
asmjit/core/virtmem.h
asmjit/core/zone.cpp
asmjit/core/zone.h
asmjit/core/zonehash.cpp
asmjit/core/zonehash.h
asmjit/core/zonelist.cpp
asmjit/core/zonelist.h
asmjit/core/zonestack.cpp
asmjit/core/zonestack.h
asmjit/core/zonestring.h
asmjit/core/zonetree.cpp
asmjit/core/zonetree.h
asmjit/core/zonevector.cpp
asmjit/core/zonevector.h
asmjit/a64.h
asmjit/arm.h
@@ -521,6 +544,14 @@ set(ASMJIT_SRC_LIST
asmjit/x86/x86operand.h
asmjit/x86/x86rapass.cpp
asmjit/x86/x86rapass_p.h
asmjit/ujit/ujitbase.h
asmjit/ujit/unicompiler_a64.cpp
asmjit/ujit/unicompiler_x86.cpp
asmjit/ujit/unicompiler.h
asmjit/ujit/uniop.h
asmjit/ujit/vecconsttable.cpp
asmjit/ujit/vecconsttable.h
)
if (MSVC AND NOT ASMJIT_NO_NATVIS)
@@ -695,6 +726,8 @@ if (NOT ASMJIT_EMBED)
endif()
endif()
set_property(SOURCE test/asmjit_test_unicompiler_avx2fma.cpp APPEND PROPERTY COMPILE_OPTIONS ${ASMJIT_AVX2FMA_CFLAGS})
asmjit_add_target(asmjit_test_compiler TEST
SOURCES test/asmjit_test_compiler.cpp
test/asmjit_test_compiler.h
@@ -704,6 +737,16 @@ if (NOT ASMJIT_EMBED)
CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_SSE2_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
asmjit_add_target(asmjit_test_unicompiler TEST
SOURCES test/asmjit_test_unicompiler.cpp
test/asmjit_test_unicompiler_sse2.cpp
test/asmjit_test_unicompiler_avx2fma.cpp
test/broken.cpp
LIBRARIES asmjit::asmjit
CFLAGS ${ASMJIT_PRIVATE_CFLAGS} ${ASMJIT_SSE2_CFLAGS}
CFLAGS_DBG ${ASMJIT_PRIVATE_CFLAGS_DBG}
CFLAGS_REL ${ASMJIT_PRIVATE_CFLAGS_REL})
endif()
endif()

View File

@@ -10,31 +10,6 @@ AsmJit is a lightweight library for machine code generation written in C++ langu
See [asmjit.com](https://asmjit.com) page for more details, examples, and documentation.
Documentation
-------------
* [Documentation Index](https://asmjit.com/doc/index.html)
* [Build Instructions](https://asmjit.com/doc/group__asmjit__build.html)
Contributing
------------
* See [CONTRIBUTING](./CONTRIBUTING.md) page for more details
Breaking Changes
----------------
Breaking the API is sometimes inevitable, what to do?
* See [Breaking Changes Guide](https://asmjit.com/doc/group__asmjit__breaking__changes.html), which is now part of AsmJit documentation
* See asmjit tests, they always compile and provide implementation of many use-cases:
* [asmjit_test_emitters.cpp](./test/asmjit_test_emitters.cpp) - Tests that demonstrate the purpose of emitters
* [asmjit_test_assembler_x86.cpp](./test/asmjit_test_assembler_x86.cpp) - Tests targeting AsmJit's Assembler (x86/x64)
* [asmjit_test_compiler_x86.cpp](./test/asmjit_test_compiler_x86.cpp) - Tests targeting AsmJit's Compiler (x86/x64)
* [asmjit_test_instinfo.cpp](./test/asmjit_test_instinfo.cpp) - Tests that query instruction information
* [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) - Multiple sections test.
* Visit our [Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help
Project Organization
--------------------
@@ -47,24 +22,47 @@ Project Organization
* **test** - Unit and integration tests (don't embed in your project)
* **tools** - Tools used for configuring, documenting, and generating files
Ports
-----
Roadmap
-------
* [ ] 32-bit ARM/Thumb port (work in progress)
* [ ] RISC-V port (not in progress, help welcome)
* See [Roadmap](https://asmjit.com/roadmap.html) page for more details
Contributing
------------
* See [CONTRIBUTING](./CONTRIBUTING.md) page for more details
Documentation
-------------
* [Documentation Index](https://asmjit.com/doc/index.html)
* [Build Instructions](https://asmjit.com/doc/group__asmjit__build.html) (includes [CMake Integration](https://asmjit.com/doc/group__asmjit__build.html#cmake_integration))
Breaking Changes
----------------
Breaking the API is sometimes inevitable, what to do?
* See [Breaking Changes Guide](https://asmjit.com/doc/group__asmjit__breaking__changes.html), which is now part of AsmJit documentation
* See asmjit tests, they always compile and provide implementation of many use-cases:
* [asmjit_test_emitters.cpp](./test/asmjit_test_emitters.cpp) - Tests that demonstrate the purpose of emitters
* [asmjit_test_assembler_x86.cpp](./test/asmjit_test_assembler_x86.cpp) - Tests targeting AsmJit's Assembler (x86/x64)
* [asmjit_test_compiler_x86.cpp](./test/asmjit_test_compiler_x86.cpp) - Tests targeting AsmJit's Compiler (x86/x64)
* [asmjit_test_instinfo.cpp](./test/asmjit_test_instinfo.cpp) - Tests that query instruction information
* [asmjit_test_x86_sections.cpp](./test/asmjit_test_x86_sections.cpp) - Multiple sections test
* Visit our [Gitter Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help
Support
-------
* AsmJit project has both community and commercial support, see [AsmJit's Support Page](https://asmjit.com/support.html)
* You can help the development and maintenance through Petr Kobalicek's [GitHub sponsors Profile](https://github.com/sponsors/kobalicek)
* Organizations that rely on AsmJit should support the development!
Notable Donors List:
* [ZehMatt](https://github.com/ZehMatt)
Authors & Maintainers
---------------------
* Petr Kobalicek <kobalicek.petr@gmail.com>
* Petr Kobalicek <kobalicek.petr@gmail.com> ([website](https://kobalicek.com))

View File

@@ -18,7 +18,6 @@ const dict = base.dict;
const NONE = base.NONE;
const Parsing = base.Parsing;
const MapUtils = base.MapUtils;
const hasOwn = base.hasOwn;
// Export
// ======
@@ -324,9 +323,9 @@ class Instruction extends base.Instruction {
super(db, data);
// name, operands, encoding, opcode, metadata
const encoding = hasOwn(data, "a32") ? "a32" :
hasOwn(data, "t32") ? "t32" :
hasOwn(data, "t16") ? "t16" : "";
const encoding = Object.hasOwn(data, "a32") ? "a32" :
Object.hasOwn(data, "t32") ? "t32" :
Object.hasOwn(data, "t16") ? "t16" : "";
this.name = data.name;
this.it = dict(); // THUMB's 'it' flags.
@@ -963,9 +962,9 @@ class ISA extends base.ISA {
const names = (sep !== -1 ? sgn.substring(0, sep) : sgn).trim().split("/");
const operands = sep !== -1 ? sgn.substring(sep + 1) : "";
const encoding = hasOwn(obj, "a32") ? "a32" :
hasOwn(obj, "t32") ? "t32" :
hasOwn(obj, "t16") ? "t16" : "";
const encoding = Object.hasOwn(obj, "a32") ? "a32" :
Object.hasOwn(obj, "t32") ? "t32" :
Object.hasOwn(obj, "t16") ? "t16" : "";
if (!encoding)
FAIL(`Instruction ${names.join("/")} doesn't encoding, it must provide either a32, t32, or t16 field`);

View File

@@ -14,7 +14,6 @@ function FAIL(msg) { throw new Error("[AArch64] " + msg); }
const base = $scope.base ? $scope.base : require("./base.js");
const exp = $scope.exp ? $scope.exp : require("./exp.js")
const hasOwn = Object.prototype.hasOwnProperty;
const dict = base.dict;
const NONE = base.NONE;
const Parsing = base.Parsing;

View File

@@ -15,14 +15,6 @@ const base = $scope[$as] = Object.create(null);
base.exp = exp;
// Import.
const hasOwnProperty = Object.prototype.hasOwnProperty;
function hasOwn(object, key) {
return hasOwnProperty.call(object, key);
}
base.hasOwn = hasOwn;
function dict(src) {
const dst = Object.create(null);
if (src)
@@ -657,7 +649,7 @@ class ISA {
_addInstruction(instruction) {
let group;
if (hasOwn(this._instructionMap, instruction.name)) {
if (Object.hasOwn(this._instructionMap, instruction.name)) {
group = this._instructionMap[instruction.name];
}
else {

View File

@@ -6,8 +6,6 @@
(function($scope, $as) {
"use strict";
const hasOwn = Object.prototype.hasOwnProperty;
// Supported Operators
// -------------------
@@ -145,7 +143,7 @@ class CallNode extends ExpNode {
class UnaryNode extends ExpNode {
constructor(op, child) {
if (!hasOwn.call(kUnaryOperators, op))
if (!Object.hasOwn(kUnaryOperators, op))
throw new Error(`Invalid unary operator '${op}`);
super("unary");
@@ -182,7 +180,7 @@ class UnaryNode extends ExpNode {
class BinaryNode extends ExpNode {
constructor(op, left, right) {
if (!hasOwn.call(kBinaryOperators, op))
if (!Object.hasOwn(kBinaryOperators, op))
throw new Error(`Invalid binary operator '${op}`);
super("binary");
@@ -406,7 +404,7 @@ function tokenize(source) {
do {
for (j = Math.min(i - start, kMaxOperatorLen); j > 0; j--) {
const part = source.substr(start, j);
if (hasOwn.call(kUnaryOperators, part) || hasOwn.call(kBinaryOperators, part) || j === 1) {
if (Object.hasOwn(kUnaryOperators, part) || Object.hasOwn(kBinaryOperators, part) || j === 1) {
tokens.push(newToken(kTokenPunct, start, part, null));
start += j;
break;
@@ -521,7 +519,7 @@ class Parser {
// Parse a possible binary operator - the loop must repeat, if present.
token = this.peek();
if (token.type === kTokenPunct && hasOwn.call(kBinaryOperators, token.data)) {
if (token.type === kTokenPunct && Object.hasOwn(kBinaryOperators, token.data)) {
const opName = token.data;
if (opName === ":")
break;
@@ -696,7 +694,7 @@ class Collector extends Visitor {
visit(node) {
if (node.type === this.nodeType) {
if (hasOwn.call(this.dict, node.name))
if (Object.hasOwn(this.dict, node.name))
this.dict[node.name]++;
else
this.dict[node.name] = 1;

View File

@@ -9,7 +9,6 @@
// Import.
const base = $scope.base ? $scope.base : require("./base.js");
const hasOwn = base.hasOwn;
const dict = base.dict;
const NONE = base.NONE;
const Parsing = base.Parsing;
@@ -175,7 +174,7 @@ const CpuRegisters = buildCpuRegs({
// X86/X64 utilities.
class Utils {
static groupOf(op) {
return hasOwn(OperandGroupInfo, op) ? OperandGroupInfo[op].group : null;
return Object.hasOwn(OperandGroupInfo, op) ? OperandGroupInfo[op].group : null;
}
static splitInstructionSignature(s) {
@@ -216,7 +215,7 @@ class Utils {
}
// Get whether the string `s` describes a register operand.
static isRegOp(s) { return s && hasOwn(CpuRegisters, s); }
static isRegOp(s) { return s && Object.hasOwn(CpuRegisters, s); }
// Get whether the string `s` describes a memory operand.
static isMemOp(s) { return s && /^(?:mem|mib|tmem|moff||(?:m(?:off)?\d+(?:dec|bcd|fp|int)?)|(?:m16_\d+)|(?:vm\d+(?:x|y|z)))$/.test(s); }
// Get whether the string `s` describes an immediate operand.
@@ -225,12 +224,12 @@ class Utils {
static isRelOp(s) { return s && /^rel\d+$/.test(s); }
// Get a register type of a `s`, returns `null` if the register is unknown.
static regTypeOf(s) { return hasOwn(CpuRegisters, s) ? CpuRegisters[s].type : null; }
static regTypeOf(s) { return Object.hasOwn(CpuRegisters, s) ? CpuRegisters[s].type : null; }
// Get a register kind of a `s`, returns `null` if the register is unknown.
static regKindOf(s) { return hasOwn(CpuRegisters, s) ? CpuRegisters[s].kind : null; }
static regKindOf(s) { return Object.hasOwn(CpuRegisters, s) ? CpuRegisters[s].kind : null; }
// Get a register type of a `s`, returns `null` if the register is unknown and `-1`
// if the given string does only represent a register type, but not a specific reg.
static regIndexOf(s) { return hasOwn(CpuRegisters, s) ? CpuRegisters[s].index : null; }
static regIndexOf(s) { return Object.hasOwn(CpuRegisters, s) ? CpuRegisters[s].index : null; }
static regSize(s) {
if (s in RegSize)
@@ -578,7 +577,7 @@ class Instruction extends base.Instruction {
}
_substituteOpcodePart(op, groupIndex) {
if (hasOwn(OpcodeGroupInfo, op)) {
if (Object.hasOwn(OpcodeGroupInfo, op)) {
return OpcodeGroupInfo[op].subst[groupIndex];
}
else {
@@ -744,7 +743,7 @@ class Instruction extends base.Instruction {
}
// Process `L/LL` field.
if (hasOwn(OpcodeLLMapping, comp)) {
if (Object.hasOwn(OpcodeLLMapping, comp)) {
this.opcode.l = OpcodeLLMapping[comp];
continue;
}
@@ -1027,7 +1026,7 @@ class Instruction extends base.Instruction {
}
if (consecutiveLead) {
consecutiveLead.consecutiveLeadCount = consecutiveLastIndex + 1;
consecutiveLead.consecutive_lead_count = consecutiveLastIndex + 1;
}
}

View File

@@ -22,7 +22,7 @@
</Expand>
</Type>
<Type Name="asmjit::ZoneVector&lt;*&gt;">
<Type Name="asmjit::ArenaVector&lt;*&gt;">
<DisplayString>{{ [size={_size, d} capacity={_capacity, d}] }}</DisplayString>
<Expand>
<Item Name="_size" ExcludeView="simple">_size, d</Item>
@@ -35,10 +35,10 @@
</Type>
<Type Name="asmjit::OperandSignature">
<Intrinsic Name="opType" Expression="(asmjit::OperandType)(_bits &amp; 0x7)" />
<Intrinsic Name="op_type" Expression="(asmjit::OperandType)(_bits &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_bits &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(asmjit::RegType)((_bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="regGroup" Expression="(asmjit::RegGroup)((_bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="reg_type" Expression="(asmjit::RegType)((_bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="reg_group" Expression="(asmjit::RegGroup)((_bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="memBaseType" Expression="(asmjit::RegType)((_bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="memIndexType" Expression="(asmjit::RegType)((_bits &gt;&gt; 8) &amp; 0x1F)" />
<Intrinsic Name="memRegHome" Expression="(bool)((_bits &gt;&gt; 13) &amp; 0x1)" />
@@ -48,36 +48,36 @@
<Intrinsic Name="memX86Broadcast" Expression="(asmjit::x86::Mem::Broadcast)((_bits &gt;&gt; 21) &amp; 0x7)" />
<Intrinsic Name="immType" Expression="(asmjit::ImmType)((_bits &gt;&gt; 3) &amp; 0x1)" />
<DisplayString Condition="opType() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kReg">[Reg] {{ type={regType()} group={regGroup()} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kMem">[Mem] {{ base={memBaseType()} index={memIndexType()} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kImm">[Imm] {{ type={immType()} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kLabel">[Label]</DisplayString>
<DisplayString Condition="opType() &gt; asmjit::OperandType::kMaxValue">[Unknown]</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kReg">[Reg] {{ type={reg_type()} group={reg_group()} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kMem">[Mem] {{ base={memBaseType()} index={memIndexType()} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kImm">[Imm] {{ type={immType()} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kLabel">[Label]</DisplayString>
<DisplayString Condition="op_type() &gt; asmjit::OperandType::kMaxValue">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="bits">_bits, X</Item>
<Item Name="op.type">opType()</Item>
<Item Name="reg.type" Condition="opType() == asmjit::OperandType::kReg">regType()</Item>
<Item Name="reg.group" Condition="opType() == asmjit::OperandType::kReg">regGroup()</Item>
<Item Name="reg.size" Condition="opType() == asmjit::OperandType::kReg">opSize(), d</Item>
<Item Name="mem.baseType" Condition="opType() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.indexType" Condition="opType() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.regHome" Condition="opType() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.size" Condition="opType() == asmjit::OperandType::kMem">opSize(), d</Item>
<Item Name="mem.x86.segment" Condition="opType() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addrType" Condition="opType() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="opType() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="opType() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="opType() == asmjit::OperandType::kImm">immType()</Item>
<Item Name="op.type">op_type()</Item>
<Item Name="reg.type" Condition="op_type() == asmjit::OperandType::kReg">reg_type()</Item>
<Item Name="reg.group" Condition="op_type() == asmjit::OperandType::kReg">reg_group()</Item>
<Item Name="reg.size" Condition="op_type() == asmjit::OperandType::kReg">opSize(), d</Item>
<Item Name="mem.base_type" Condition="op_type() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.index_type" Condition="op_type() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.regHome" Condition="op_type() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.size" Condition="op_type() == asmjit::OperandType::kMem">opSize(), d</Item>
<Item Name="mem.x86.segment" Condition="op_type() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addr_type" Condition="op_type() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="op_type() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="op_type() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="op_type() == asmjit::OperandType::kImm">immType()</Item>
</Expand>
</Type>
<Type Name="asmjit::Operand_">
<Intrinsic Name="opType" Expression="(asmjit::OperandType)(_signature._bits &amp; 0x7)" />
<Intrinsic Name="op_type" Expression="(asmjit::OperandType)(_signature._bits &amp; 0x7)" />
<Intrinsic Name="opSize" Expression="(_signature._bits &gt;&gt; 24) &amp; 0xFF" />
<Intrinsic Name="regType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="regGroup" Expression="(asmjit::RegGroup)((_signature._bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="reg_type" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="reg_group" Expression="(asmjit::RegGroup)((_signature._bits &gt;&gt; 8) &amp; 0xF)" />
<Intrinsic Name="memBaseType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 3) &amp; 0x1F)" />
<Intrinsic Name="memIndexType" Expression="(asmjit::RegType)((_signature._bits &gt;&gt; 8) &amp; 0x1F)" />
<Intrinsic Name="memRegHome" Expression="(bool)((_signature._bits &gt;&gt; 13) &amp; 0x1)" />
@@ -85,161 +85,161 @@
<Intrinsic Name="memX86AddrType" Expression="(asmjit::x86::Mem::AddrType)((_signature._bits &gt;&gt; 14) &amp; 0x3)" />
<Intrinsic Name="memX86ShiftValue" Expression="((_signature._bits &gt;&gt; 16) &amp; 0x3)" />
<Intrinsic Name="memX86Broadcast" Expression="(asmjit::x86::Mem::Broadcast)((_signature._bits &gt;&gt; 21) &amp; 0x7)" />
<Intrinsic Name="memBaseId" Expression="_baseId" />
<Intrinsic Name="memBaseId" Expression="_base_id" />
<Intrinsic Name="memIndexId" Expression="_data[0]" />
<Intrinsic Name="memOffset32b" Expression="(__int64)int(_data[1])" />
<Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_baseId &lt;&lt; 32) | ((unsigned __int64)_data[1])" />
<Intrinsic Name="memOffset64b" Expression="(__int64) ((unsigned __int64)_base_id &lt;&lt; 32) | ((unsigned __int64)_data[1])" />
<Intrinsic Name="memOffset" Expression="memBaseType() != asmjit::RegType::kNone ? memOffset32b() : memOffset64b()" />
<Intrinsic Name="immType" Expression="(asmjit::ImmType)((_signature._bits &gt;&gt; 3) &amp; 0x1)" />
<Intrinsic Name="immValue" Expression="((__int64)_data[1] &lt;&lt; 32) | (__int64)_data[0]" />
<DisplayString Condition="opType() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kReg">[Reg] {{ id={_baseId, d} group={regGroup(), d} type={regType(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kMem">[Mem] {{ baseId={memBaseId(), d} indexId={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kImm">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="opType() == asmjit::OperandType::kLabel">[Label] {{ id={_baseId} }}</DisplayString>
<DisplayString Condition="opType() &gt; 4">[Unknown]</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kNone">[None]</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kReg">[Reg] {{ id={_base_id, d} group={reg_group(), d} type={reg_type(), d} size={opSize(), d} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kMem">[Mem] {{ base_id={memBaseId(), d} index_id={memIndexId(), d} offset={(__int64)memOffset(), d} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kImm">[Imm] {{ val={immValue(), d} hex={immValue(), X} }}</DisplayString>
<DisplayString Condition="op_type() == asmjit::OperandType::kLabel">[Label] {{ id={_base_id} }}</DisplayString>
<DisplayString Condition="op_type() &gt; 4">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="_signature">_signature._bits, X</Item>
<Item Name="op.type">opType()</Item>
<Item Name="op.type">op_type()</Item>
<Item Name="op.size">opSize(), d</Item>
<Item Name="reg.type" Condition="opType() == asmjit::OperandType::kReg">regType()</Item>
<Item Name="reg.group" Condition="opType() == asmjit::OperandType::kReg">regGroup()</Item>
<Item Name="reg.id" Condition="opType() == asmjit::OperandType::kReg">_baseId, d</Item>
<Item Name="mem.baseType" Condition="opType() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.baseId" Condition="opType() == asmjit::OperandType::kMem &amp;&amp; memBaseType() != asmjit::RegType::kNone">memBaseId()</Item>
<Item Name="mem.indexType" Condition="opType() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.indexId" Condition="opType() == asmjit::OperandType::kMem &amp;&amp; memIndexType() != asmjit::RegType::kNone">memIndexId()</Item>
<Item Name="mem.regHome" Condition="opType() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.offset" Condition="opType() == asmjit::OperandType::kMem">memOffset(), d</Item>
<Item Name="mem.x86.segment" Condition="opType() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addrType" Condition="opType() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="opType() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="opType() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="opType() == asmjit::OperandType::kImm">immType()</Item>
<Item Name="imm.value" Condition="opType() == asmjit::OperandType::kImm">immValue(), X</Item>
<Item Name="label.id" Condition="opType() == asmjit::OperandType::kLabel">_baseId, d</Item>
<Item Name="raw.baseId">_baseId</Item>
<Item Name="reg.type" Condition="op_type() == asmjit::OperandType::kReg">reg_type()</Item>
<Item Name="reg.group" Condition="op_type() == asmjit::OperandType::kReg">reg_group()</Item>
<Item Name="reg.id" Condition="op_type() == asmjit::OperandType::kReg">_base_id, d</Item>
<Item Name="mem.base_type" Condition="op_type() == asmjit::OperandType::kMem">memBaseType()</Item>
<Item Name="mem.base_id" Condition="op_type() == asmjit::OperandType::kMem &amp;&amp; memBaseType() != asmjit::RegType::kNone">memBaseId()</Item>
<Item Name="mem.index_type" Condition="op_type() == asmjit::OperandType::kMem">memIndexType()</Item>
<Item Name="mem.index_id" Condition="op_type() == asmjit::OperandType::kMem &amp;&amp; memIndexType() != asmjit::RegType::kNone">memIndexId()</Item>
<Item Name="mem.regHome" Condition="op_type() == asmjit::OperandType::kMem">memRegHome()</Item>
<Item Name="mem.offset" Condition="op_type() == asmjit::OperandType::kMem">memOffset(), d</Item>
<Item Name="mem.x86.segment" Condition="op_type() == asmjit::OperandType::kMem">memX86Segment()</Item>
<Item Name="mem.x86.addr_type" Condition="op_type() == asmjit::OperandType::kMem">memX86AddrType()</Item>
<Item Name="mem.x86.shift" Condition="op_type() == asmjit::OperandType::kMem">memX86ShiftValue()</Item>
<Item Name="mem.x86.broadcast" Condition="op_type() == asmjit::OperandType::kMem">memX86Broadcast()</Item>
<Item Name="imm.type" Condition="op_type() == asmjit::OperandType::kImm">immType()</Item>
<Item Name="imm.value" Condition="op_type() == asmjit::OperandType::kImm">immValue(), X</Item>
<Item Name="label.id" Condition="op_type() == asmjit::OperandType::kLabel">_base_id, d</Item>
<Item Name="raw.base_id">_base_id</Item>
<Item Name="raw.data[0]">_data[0]</Item>
<Item Name="raw.data[1]">_data[1]</Item>
</Expand>
</Type>
<Type Name="asmjit::FuncValue">
<Intrinsic Name="isReg" Expression="(_data &amp; asmjit::FuncValue::kFlagIsReg) != 0" />
<Intrinsic Name="is_reg" Expression="(_data &amp; asmjit::FuncValue::kFlagIsReg) != 0" />
<Intrinsic Name="isStack" Expression="(_data &amp; asmjit::FuncValue::kFlagIsStack) != 0" />
<Intrinsic Name="isIndirect" Expression="(_data &amp; asmjit::FuncValue::kFlagIsIndirect) != 0" />
<Intrinsic Name="isDone" Expression="(_data &amp; asmjit::FuncValue::kFlagIsDone) != 0" />
<Intrinsic Name="is_done" Expression="(_data &amp; asmjit::FuncValue::kFlagIsDone) != 0" />
<Intrinsic Name="typeId" Expression="((_data &amp; asmjit::FuncValue::kTypeIdMask) &gt;&gt; asmjit::FuncValue::kTypeIdShift)" />
<Intrinsic Name="regId" Expression="((_data &amp; asmjit::FuncValue::kRegIdMask) &gt;&gt; asmjit::FuncValue::kRegIdShift)" />
<Intrinsic Name="regType" Expression="((_data &amp; asmjit::FuncValue::kRegTypeMask) &gt;&gt; asmjit::FuncValue::kRegTypeShift)" />
<Intrinsic Name="stackOffset" Expression="((_data &amp; asmjit::FuncValue::kStackOffsetMask) &gt;&gt; asmjit::FuncValue::kStackOffsetShift)" />
<Intrinsic Name="type_id" Expression="((_data &amp; asmjit::FuncValue::kTypeIdMask) &gt;&gt; asmjit::FuncValue::kTypeIdShift)" />
<Intrinsic Name="reg_id" Expression="((_data &amp; asmjit::FuncValue::kRegIdMask) &gt;&gt; asmjit::FuncValue::kRegIdShift)" />
<Intrinsic Name="reg_type" Expression="((_data &amp; asmjit::FuncValue::kRegTypeMask) &gt;&gt; asmjit::FuncValue::kRegTypeShift)" />
<Intrinsic Name="stack_offset" Expression="((_data &amp; asmjit::FuncValue::kStackOffsetMask) &gt;&gt; asmjit::FuncValue::kStackOffsetShift)" />
<DisplayString Condition="isReg()">[RegValue {{ regType={regType()} indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="isStack()">[StackValue {{ indirect={isIndirect()} done={isDone()} }}]</DisplayString>
<DisplayString Condition="!isReg() &amp;&amp; !isStack()">[Unknown]</DisplayString>
<DisplayString Condition="is_reg()">[RegValue {{ reg_type={reg_type()} indirect={isIndirect()} done={is_done()} }}]</DisplayString>
<DisplayString Condition="isStack()">[StackValue {{ indirect={isIndirect()} done={is_done()} }}]</DisplayString>
<DisplayString Condition="!is_reg() &amp;&amp; !isStack()">[Unknown]</DisplayString>
<Expand HideRawView="true">
<Item Name="data">_data</Item>
<Item Name="typeId">(asmjit::TypeId)(typeId())</Item>
<Item Name="regType" Condition="isReg()">(asmjit::RegType)regType()</Item>
<Item Name="regId" Condition="isReg()">regId()</Item>
<Item Name="stackOffset" Condition="isStack()">stackOffset()</Item>
<Item Name="type_id">(asmjit::TypeId)(type_id())</Item>
<Item Name="reg_type" Condition="is_reg()">(asmjit::RegType)reg_type()</Item>
<Item Name="reg_id" Condition="is_reg()">reg_id()</Item>
<Item Name="stack_offset" Condition="isStack()">stack_offset()</Item>
</Expand>
</Type>
<Type Name="asmjit::BaseNode">
<Intrinsic Name="nodeType" Expression="_nodeType" />
<Intrinsic Name="nodeType" Expression="_node_type" />
<Intrinsic Name="isInst" Expression="nodeType() == asmjit::NodeType::kInst"></Intrinsic>
<Intrinsic Name="isSection" Expression="nodeType() == asmjit::NodeType::kSection"></Intrinsic>
<Intrinsic Name="isLabel" Expression="nodeType() == asmjit::NodeType::kLabel"></Intrinsic>
<Intrinsic Name="isAlign" Expression="nodeType() == asmjit::NodeType::kAlign"></Intrinsic>
<Intrinsic Name="isEmbedData" Expression="nodeType() == asmjit::NodeType::kEmbedData"></Intrinsic>
<Intrinsic Name="isEmbedLabel" Expression="nodeType() == asmjit::NodeType::kEmbedLabel"></Intrinsic>
<Intrinsic Name="isEmbedLabelDelta" Expression="nodeType() == asmjit::NodeType::kEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="isConstPool" Expression="nodeType() == asmjit::NodeType::kConstPool"></Intrinsic>
<Intrinsic Name="isComment" Expression="nodeType() == asmjit::NodeType::kComment"></Intrinsic>
<Intrinsic Name="isSentinel" Expression="nodeType() == asmjit::NodeType::kSentinel"></Intrinsic>
<Intrinsic Name="is_inst" Expression="nodeType() == asmjit::NodeType::kInst"></Intrinsic>
<Intrinsic Name="is_section" Expression="nodeType() == asmjit::NodeType::kSection"></Intrinsic>
<Intrinsic Name="is_label" Expression="nodeType() == asmjit::NodeType::kLabel"></Intrinsic>
<Intrinsic Name="is_align" Expression="nodeType() == asmjit::NodeType::kAlign"></Intrinsic>
<Intrinsic Name="is_embed_data" Expression="nodeType() == asmjit::NodeType::kEmbedData"></Intrinsic>
<Intrinsic Name="is_embed_label" Expression="nodeType() == asmjit::NodeType::kEmbedLabel"></Intrinsic>
<Intrinsic Name="is_embed_label_delta" Expression="nodeType() == asmjit::NodeType::kEmbedLabelDelta"></Intrinsic>
<Intrinsic Name="is_const_pool" Expression="nodeType() == asmjit::NodeType::kConstPool"></Intrinsic>
<Intrinsic Name="is_comment" Expression="nodeType() == asmjit::NodeType::kComment"></Intrinsic>
<Intrinsic Name="is_sentinel" Expression="nodeType() == asmjit::NodeType::kSentinel"></Intrinsic>
<Intrinsic Name="isJump" Expression="nodeType() == asmjit::NodeType::kJump"></Intrinsic>
<Intrinsic Name="isFunc" Expression="nodeType() == asmjit::NodeType::kFunc"></Intrinsic>
<Intrinsic Name="isFuncRet" Expression="nodeType() == asmjit::NodeType::kFuncRet"></Intrinsic>
<Intrinsic Name="isInvoke" Expression="nodeType() == asmjit::NodeType::kInvoke"></Intrinsic>
<Intrinsic Name="is_func" Expression="nodeType() == asmjit::NodeType::kFunc"></Intrinsic>
<Intrinsic Name="is_func_ret" Expression="nodeType() == asmjit::NodeType::kFuncRet"></Intrinsic>
<Intrinsic Name="is_invoke" Expression="nodeType() == asmjit::NodeType::kInvoke"></Intrinsic>
<Intrinsic Name="actsAsInst" Expression="isInst() || isJump() || isFunc() || isFuncRet() || isInvoke()" />
<Intrinsic Name="actsAsLabel" Expression="isLabel() || isFunc()" />
<Intrinsic Name="actsAsInst" Expression="is_inst() || isJump() || is_func() || is_func_ret() || is_invoke()" />
<Intrinsic Name="actsAsLabel" Expression="is_label() || is_func()" />
<DisplayString Condition="isInst()">[InstNode]</DisplayString>
<DisplayString Condition="isSection()">[SectionNode]</DisplayString>
<DisplayString Condition="isLabel()">[LabelNode]</DisplayString>
<DisplayString Condition="isAlign()">[AlignNode]</DisplayString>
<DisplayString Condition="isEmbedData()">[EmbedDataNode]</DisplayString>
<DisplayString Condition="isEmbedLabel()">[EmbedLabelNode]</DisplayString>
<DisplayString Condition="isEmbedLabelDelta()">[EmbedLabelDeltaNode]</DisplayString>
<DisplayString Condition="isConstPool()">[ConstPoolNode]</DisplayString>
<DisplayString Condition="isComment()">[CommentNode]</DisplayString>
<DisplayString Condition="isSentinel()">[SentinelNode]</DisplayString>
<DisplayString Condition="is_inst()">[InstNode]</DisplayString>
<DisplayString Condition="is_section()">[SectionNode]</DisplayString>
<DisplayString Condition="is_label()">[LabelNode]</DisplayString>
<DisplayString Condition="is_align()">[AlignNode]</DisplayString>
<DisplayString Condition="is_embed_data()">[EmbedDataNode]</DisplayString>
<DisplayString Condition="is_embed_label()">[EmbedLabelNode]</DisplayString>
<DisplayString Condition="is_embed_label_delta()">[EmbedLabelDeltaNode]</DisplayString>
<DisplayString Condition="is_const_pool()">[ConstPoolNode]</DisplayString>
<DisplayString Condition="is_comment()">[CommentNode]</DisplayString>
<DisplayString Condition="is_sentinel()">[SentinelNode]</DisplayString>
<DisplayString Condition="isJump()">[JumpNode]</DisplayString>
<DisplayString Condition="isFunc()">[FuncNode]</DisplayString>
<DisplayString Condition="isFuncRet()">[FuncRetNode]</DisplayString>
<DisplayString Condition="isInvoke()">[InvokeNode]</DisplayString>
<DisplayString Condition="is_func()">[FuncNode]</DisplayString>
<DisplayString Condition="is_func_ret()">[FuncRetNode]</DisplayString>
<DisplayString Condition="is_invoke()">[InvokeNode]</DisplayString>
<DisplayString Condition="nodeType() == asmjit::NodeType::kNone || nodeType() &gt; 18">[UnknownNode {nodeType(), d}]</DisplayString>
<Expand HideRawView="true">
<Item Name="prev">_prev</Item>
<Item Name="next">_next</Item>
<Item Name="nodeType">_nodeType</Item>
<Item Name="nodeFlags">_nodeFlags</Item>
<Item Name="nodeType">_node_type</Item>
<Item Name="nodeFlags">_node_flags</Item>
<Item Name="position">_position</Item>
<Item Name="userData.u64">_userDataU64</Item>
<Item Name="userData.ptr">_userDataPtr</Item>
<Item Name="passData">_passData</Item>
<Item Name="inlineComment">_inlineComment, s8</Item>
<Item Name="user_data.u64">_userDataU64</Item>
<Item Name="user_data.ptr">_userDataPtr</Item>
<Item Name="pass_data">_passData</Item>
<Item Name="inline_comment">_inline_comment, s8</Item>
<Item Name="baseInst" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_baseInst</Item>
<Item Name="opCount" Condition="actsAsInst()">_inst._opCount</Item>
<Item Name="opCapacity" Condition="actsAsInst()">_inst._opCapacity</Item>
<Item Name="opArray" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_opArray, [_inst._opCount]</Item>
<Item Name="op_count" Condition="actsAsInst()">_inst._op_count</Item>
<Item Name="op_capacity" Condition="actsAsInst()">_inst._op_capacity</Item>
<Item Name="op_array" Condition="actsAsInst()">((asmjit::InstNode*)this)-&gt;_opArray, [_inst._op_count]</Item>
<Item Name="sectionId" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_id</Item>
<Item Name="nextSection" Condition="isSection()">((asmjit::SectionNode*)this)-&gt;_nextSection</Item>
<Item Name="section_id" Condition="is_section()">((asmjit::SectionNode*)this)-&gt;_section_id</Item>
<Item Name="nextSection" Condition="is_section()">((asmjit::SectionNode*)this)-&gt;_nextSection</Item>
<Item Name="labelId" Condition="isLabel()">((asmjit::LabelNode*)this)-&gt;_labelId</Item>
<Item Name="label_id" Condition="is_label()">((asmjit::LabelNode*)this)-&gt;_label_id</Item>
<Item Name="alignMode" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignData._alignMode</Item>
<Item Name="alignment" Condition="isAlign()">((asmjit::AlignNode*)this)-&gt;_alignment</Item>
<Item Name="align_mode" Condition="is_align()">((asmjit::AlignNode*)this)-&gt;_align_data._align_mode</Item>
<Item Name="alignment" Condition="is_align()">((asmjit::AlignNode*)this)-&gt;_alignment</Item>
<Item Name="typeId" Condition="isEmbedData()">_embed._typeId, d</Item>
<Item Name="typeSize" Condition="isEmbedData()">_embed._typeSize, d</Item>
<Item Name="itemCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_itemCount</Item>
<Item Name="repeatCount" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_repeatCount</Item>
<Item Name="inlineData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_inlineData</Item>
<Item Name="externalData" Condition="isEmbedData()">((asmjit::EmbedDataNode*)this)-&gt;_externalData</Item>
<Item Name="type_id" Condition="is_embed_data()">_embed._type_id, d</Item>
<Item Name="type_size" Condition="is_embed_data()">_embed._type_size, d</Item>
<Item Name="item_count" Condition="is_embed_data()">((asmjit::EmbedDataNode*)this)-&gt;_item_count</Item>
<Item Name="repeat_count" Condition="is_embed_data()">((asmjit::EmbedDataNode*)this)-&gt;_repeat_count</Item>
<Item Name="inlineData" Condition="is_embed_data()">((asmjit::EmbedDataNode*)this)-&gt;_inlineData</Item>
<Item Name="externalData" Condition="is_embed_data()">((asmjit::EmbedDataNode*)this)-&gt;_externalData</Item>
<Item Name="labelId" Condition="isEmbedLabel()">((asmjit::EmbedLabelNode*)this)-&gt;_labelId</Item>
<Item Name="label_id" Condition="is_embed_label()">((asmjit::EmbedLabelNode*)this)-&gt;_label_id</Item>
<Item Name="labelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_labelId</Item>
<Item Name="baseLabelId" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_baseLabelId</Item>
<Item Name="dataSize" Condition="isEmbedLabelDelta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_dataSize</Item>
<Item Name="label_id" Condition="is_embed_label_delta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_label_id</Item>
<Item Name="base_label_id" Condition="is_embed_label_delta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_base_label_id</Item>
<Item Name="data_size" Condition="is_embed_label_delta()">((asmjit::EmbedLabelDeltaNode*)this)-&gt;_data_size</Item>
<Item Name="constPool" Condition="isConstPool()">((asmjit::ConstPoolNode*)this)-&gt;_constPool</Item>
<Item Name="const_pool" Condition="is_const_pool()">((asmjit::ConstPoolNode*)this)-&gt;_const_pool</Item>
<Item Name="sentinel.sentinelType" Condition="isSentinel()">_sentinel._sentinelType</Item>
<Item Name="sentinel.sentinel_type" Condition="is_sentinel()">_sentinel._sentinel_type</Item>
<Item Name="annotation" Condition="isJump()">((asmjit::JumpNode*)this)-&gt;_annotation</Item>
<Item Name="funcDetail" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_funcDetail</Item>
<Item Name="frame" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_frame</Item>
<Item Name="exitNode" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_exitNode</Item>
<Item Name="end" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_end</Item>
<Item Name="args" Condition="isFunc()">((asmjit::FuncNode*)this)-&gt;_args, [((asmjit::FuncNode*)this)-&gt;_funcDetail._argCount]</Item>
<Item Name="func_detail" Condition="is_func()">((asmjit::FuncNode*)this)-&gt;_func_detail</Item>
<Item Name="frame" Condition="is_func()">((asmjit::FuncNode*)this)-&gt;_frame</Item>
<Item Name="exit_node" Condition="is_func()">((asmjit::FuncNode*)this)-&gt;_exit_node</Item>
<Item Name="end" Condition="is_func()">((asmjit::FuncNode*)this)-&gt;_end</Item>
<Item Name="args" Condition="is_func()">((asmjit::FuncNode*)this)-&gt;_args, [((asmjit::FuncNode*)this)-&gt;_func_detail._arg_count]</Item>
<Item Name="funcDetail" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_funcDetail</Item>
<Item Name="rets" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_rets</Item>
<Item Name="args" Condition="isInvoke()">((asmjit::InvokeNode*)this)-&gt;_args, [((asmjit::InvokeNode*)this)-&gt;_funcDetail._argCount]</Item>
<Item Name="func_detail" Condition="is_invoke()">((asmjit::InvokeNode*)this)-&gt;_func_detail</Item>
<Item Name="rets" Condition="is_invoke()">((asmjit::InvokeNode*)this)-&gt;_rets</Item>
<Item Name="args" Condition="is_invoke()">((asmjit::InvokeNode*)this)-&gt;_args, [((asmjit::InvokeNode*)this)-&gt;_func_detail._arg_count]</Item>
</Expand>
</Type>
</AutoVisualizer>

View File

@@ -11,20 +11,13 @@
//! ### Namespaces
//!
//! - \ref arm - arm namespace provides common functionality for both AArch32 and AArch64 backends.
//! - \ref a32 - a32 namespace provides support for AArch32 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch32 architecture.
//! - \ref a64 - a64 namespace provides support for AArch64 architecture. In addition it includes
//! \ref arm namespace, so you can only use a single namespace when targeting AArch64 architecture.
//!
//! ### Emitters
//!
//! - AArch32
//! - \ref a32::Assembler - AArch32 assembler (must read, provides examples).
//! - \ref a32::Builder - AArch32 builder.
//! - \ref a32::Compiler - AArch32 compiler.
//! - \ref a32::Emitter - AArch32 emitter (abstract).
//!
//! - AArch64
//! - AArch64:
//! - \ref a64::Assembler - AArch64 assembler (must read, provides examples).
//! - \ref a64::Builder - AArch64 builder.
//! - \ref a64::Compiler - AArch64 compiler.
@@ -32,13 +25,6 @@
//!
//! ### Supported Instructions
//!
//! - AArch32:
//! - Emitters:
//! - \ref a32::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
//! utility functions. The member functions provided are part of all AArch32 emitters.
//! - Instruction representation:
//! - \ref a32::Inst::Id - instruction identifiers.
//!
//! - AArch64:
//! - Emitters:
//! - \ref a64::EmitterExplicitT - Provides all instructions that use explicit operands, provides also
@@ -46,25 +32,19 @@
//! - Instruction representation:
//! - \ref a64::Inst::Id - instruction identifiers.
//!
//! ### Register Operands
//!
//! - AArch32:
//! - \ref a32::Gp - 32-bit general purpose register used by AArch32:
//! - \ref a32::Vec - Vector (SIMD) register.
//! ### ARM Operands
//!
//! - AArch64:
//! - \ref a64::Gp - 32-bit or 64-bit general purpose register used by AArch64:
//! - \ref a64::Vec - Vector (SIMD) register.
//! - \ref a64::Mem - AArch64 memory operand that provides support for all AArch64 addressing features
//! including base, index, pre/post increment, and AArch64 specific shift/extend of memory index.
//!
//! ### Memory Operands
//!
//! - \ref arm::Mem - AArch32/AArch64 memory operand that provides support for all ARM addressing features
//! including base, index, pre/post increment, and ARM-specific shift addressing and index extending.
//!
//! ### Other
//!
//! - \ref arm::Shift - Shift operation and value (both AArch32 and AArch64).
//! - \ref arm::DataType - Data type that is part of an instruction in AArch32 mode.
//! - \ref arm::Utils - Utilities that can help during code generation for AArch32 and AArch64.
#include "core.h"

View File

@@ -18,7 +18,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
//! \addtogroup asmjit_a64
//! \{
static const constexpr ArchTraits a64ArchTraits = {
static const constexpr ArchTraits a64_arch_traits = {
// SP/FP/LR/PC.
Gp::kIdSp, Gp::kIdFp, Gp::kIdLr, 0xFFu,

File diff suppressed because it is too large Load Diff

View File

@@ -34,22 +34,22 @@ public:
//! \name Emit
//! \{
ASMJIT_API Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) override;
ASMJIT_API Error _emit(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) override;
//! \}
//! \name Align
//! \{
ASMJIT_API Error align(AlignMode alignMode, uint32_t alignment) override;
ASMJIT_API Error align(AlignMode align_mode, uint32_t alignment) override;
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_attach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_detach(CodeHolder& code) noexcept override;
//! \}
};

View File

@@ -16,8 +16,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// =========================================
Builder::Builder(CodeHolder* code) noexcept : BaseBuilder() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
initEmitterFuncs(this);
_arch_mask = uint64_t(1) << uint32_t(Arch::kAArch64);
init_emitter_funcs(this);
if (code) {
code->attach(this);
@@ -28,28 +28,28 @@ Builder::~Builder() noexcept {}
// a64::Builder - Events
// =====================
Error Builder::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error Builder::on_attach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::on_attach(code));
_instructionAlignment = uint8_t(4);
updateEmitterFuncs(this);
_instruction_alignment = uint8_t(4);
update_emitter_funcs(this);
return kErrorOk;
return Error::kOk;
}
Error Builder::onDetach(CodeHolder& code) noexcept {
return Base::onDetach(code);
Error Builder::on_detach(CodeHolder& code) noexcept {
return Base::on_detach(code);
}
// a64::Builder - Finalize
// =======================
Error Builder::finalize() {
ASMJIT_PROPAGATE(runPasses());
ASMJIT_PROPAGATE(run_passes());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
a.add_encoding_options(encoding_options());
a.add_diagnostic_options(diagnostic_options());
return serialize_to(&a);
}
ASMJIT_END_SUB_NAMESPACE

View File

@@ -36,8 +36,8 @@ public:
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_attach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_detach(CodeHolder& code) noexcept override;
//! \}

View File

@@ -17,8 +17,8 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// ==========================================
Compiler::Compiler(CodeHolder* code) noexcept : BaseCompiler() {
_archMask = uint64_t(1) << uint32_t(Arch::kAArch64);
initEmitterFuncs(this);
_arch_mask = uint64_t(1) << uint32_t(Arch::kAArch64);
init_emitter_funcs(this);
if (code) {
code->attach(this);
@@ -29,29 +29,29 @@ Compiler::~Compiler() noexcept {}
// a64::Compiler - Events
// ======================
Error Compiler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error err = addPassT<ARMRAPass>();
Error Compiler::on_attach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::on_attach(code));
Error err = add_pass<ARMRAPass>();
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
on_detach(code);
return err;
}
_instructionAlignment = uint8_t(4);
updateEmitterFuncs(this);
_instruction_alignment = uint8_t(4);
update_emitter_funcs(this);
return kErrorOk;
return Error::kOk;
}
Error Compiler::onDetach(CodeHolder& code) noexcept {
return Base::onDetach(code);
Error Compiler::on_detach(CodeHolder& code) noexcept {
return Base::on_detach(code);
}
Error Compiler::onReinit(CodeHolder& code) noexcept {
Error err = Base::onReinit(code);
if (err == kErrorOk) {
err = addPassT<ARMRAPass>();
Error Compiler::on_reinit(CodeHolder& code) noexcept {
Error err = Base::on_reinit(code);
if (err == Error::kOk) {
err = add_pass<ARMRAPass>();
}
return err;
}
@@ -60,11 +60,11 @@ Error Compiler::onReinit(CodeHolder& code) noexcept {
// ========================
Error Compiler::finalize() {
ASMJIT_PROPAGATE(runPasses());
ASMJIT_PROPAGATE(run_passes());
Assembler a(_code);
a.addEncodingOptions(encodingOptions());
a.addDiagnosticOptions(diagnosticOptions());
return serializeTo(&a);
a.add_encoding_options(encoding_options());
a.add_diagnostic_options(diagnostic_options());
return serialize_to(&a);
}
ASMJIT_END_SUB_NAMESPACE

View File

@@ -39,96 +39,83 @@ public:
//! \cond INTERNAL
template<typename RegT, typename Type>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type) {
ASMJIT_INLINE_NODEBUG RegT _new_reg_internal(const Type& type) {
RegT reg(Globals::NoInit);
_newReg(&reg, type, nullptr);
_new_reg(Out<Reg>{reg}, type, nullptr);
return reg;
}
template<typename RegT, typename Type>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type, const char* s) {
ASMJIT_INLINE_NODEBUG RegT _new_reg_internal(const Type& type, const char* s) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
_newReg(&reg, type, s);
_new_reg(Out<Reg>{reg}, type, s);
return reg;
#else
DebugUtils::unused(s);
return _newRegInternal<RegT>(type);
Support::maybe_unused(s);
return _new_reg_internal<RegT>(type);
#endif
}
template<typename RegT, typename Type, typename... Args>
ASMJIT_INLINE_NODEBUG RegT _newRegInternal(const Type& type, const char* s, Args&&... args) {
ASMJIT_INLINE_NODEBUG RegT _new_reg_internal(const Type& type, const char* s, Args&&... args) {
#ifndef ASMJIT_NO_LOGGING
RegT reg(Globals::NoInit);
_newRegFmt(&reg, type, s, std::forward<Args>(args)...);
_new_reg_fmt(Out<Reg>{reg}, type, s, std::forward<Args>(args)...);
return reg;
#else
DebugUtils::unused(s, std::forward<Args>(args)...);
return _newRegInternal<RegT>(type);
Support::maybe_unused(s, std::forward<Args>(args)...);
return _new_reg_internal<RegT>(type);
#endif
}
//! \endcond
template<typename RegT, typename... Args>
ASMJIT_INLINE_NODEBUG RegT newSimilarReg(const RegT& ref, Args&&... args) {
return _newRegInternal<RegT>(ref, std::forward<Args>(args)...);
ASMJIT_INLINE_NODEBUG RegT new_similar_reg(const RegT& ref, Args&&... args) {
return _new_reg_internal<RegT>(ref, std::forward<Args>(args)...);
}
template<typename... Args>
ASMJIT_INLINE_NODEBUG Reg newReg(TypeId typeId, Args&&... args) { return _newRegInternal<Reg>(typeId, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Reg new_reg(TypeId type_id, Args&&... args) { return _new_reg_internal<Reg>(type_id, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp(TypeId typeId, Args&&... args) { return _newRegInternal<Gp>(typeId, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Gp new_gp(TypeId type_id, Args&&... args) { return _new_reg_internal<Gp>(type_id, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVec(TypeId typeId, Args&&... args) { return _newRegInternal<Vec>(typeId, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Gp new_gp32(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp new_gp64(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt32, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Gp new_gpw(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Gp new_gpx(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp new_gpz(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp new_gp_ptr(Args&&... args) { return _new_reg_internal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUInt64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Vec new_vec(TypeId type_id, Args&&... args) { return _new_reg_internal<Vec>(type_id, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newUIntPtr(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Vec new_vec_s(Args&&... args) { return _new_reg_internal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp32(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGp64(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Vec new_vec_d(Args&&... args) { return _new_reg_internal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpw(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpx(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUInt64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Gp newGpz(Args&&... args) { return _newRegInternal<Gp>(TypeId::kUIntPtr, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecS(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat32, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecD(Args&&... args) { return _newRegInternal<Vec>(TypeId::kFloat64, std::forward<Args>(args)...); }
template<typename... Args>
ASMJIT_INLINE_NODEBUG Vec newVecQ(Args&&... args) { return _newRegInternal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
ASMJIT_INLINE_NODEBUG Vec new_vec_q(Args&&... args) { return _new_reg_internal<Vec>(TypeId::kUInt8x16, std::forward<Args>(args)...); }
//! \}
//! \name Stack
//! \{
//! Creates a new memory chunk allocated on the current function's stack.
ASMJIT_INLINE_NODEBUG Mem newStack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
//! Creates a new stack and returns a \ref Mem operand that can be used to address it.
ASMJIT_INLINE_NODEBUG Mem new_stack(uint32_t size, uint32_t alignment, const char* name = nullptr) {
Mem m(Globals::NoInit);
_newStack(&m, size, alignment, name);
_new_stack(Out<BaseMem>(m), size, alignment, name);
return m;
}
@@ -138,38 +125,38 @@ public:
//! \{
//! Put data to a constant-pool and get a memory reference to it.
ASMJIT_INLINE_NODEBUG Mem newConst(ConstPoolScope scope, const void* data, size_t size) {
ASMJIT_INLINE_NODEBUG Mem new_const(ConstPoolScope scope, const void* data, size_t size) {
Mem m(Globals::NoInit);
_newConst(&m, scope, data, size);
_new_const(Out<BaseMem>(m), scope, data, size);
return m;
}
//! Put a BYTE `val` to a constant-pool (8 bits).
ASMJIT_INLINE_NODEBUG Mem newByteConst(ConstPoolScope scope, uint8_t val) noexcept { return newConst(scope, &val, 1); }
ASMJIT_INLINE_NODEBUG Mem new_byte_const(ConstPoolScope scope, uint8_t val) noexcept { return new_const(scope, &val, 1); }
//! Put a HWORD `val` to a constant-pool (16 bits).
ASMJIT_INLINE_NODEBUG Mem newHWordConst(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
ASMJIT_INLINE_NODEBUG Mem new_half_const(ConstPoolScope scope, uint16_t val) noexcept { return new_const(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool (32 bits).
ASMJIT_INLINE_NODEBUG Mem newWordConst(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
ASMJIT_INLINE_NODEBUG Mem new_word_const(ConstPoolScope scope, uint32_t val) noexcept { return new_const(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool (64 bits).
ASMJIT_INLINE_NODEBUG Mem newDWordConst(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
ASMJIT_INLINE_NODEBUG Mem new_dword_const(ConstPoolScope scope, uint64_t val) noexcept { return new_const(scope, &val, 8); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt16Const(ConstPoolScope scope, int16_t val) noexcept { return newConst(scope, &val, 2); }
ASMJIT_INLINE_NODEBUG Mem new_int16_const(ConstPoolScope scope, int16_t val) noexcept { return new_const(scope, &val, 2); }
//! Put a WORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt16Const(ConstPoolScope scope, uint16_t val) noexcept { return newConst(scope, &val, 2); }
ASMJIT_INLINE_NODEBUG Mem new_uint16_const(ConstPoolScope scope, uint16_t val) noexcept { return new_const(scope, &val, 2); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt32Const(ConstPoolScope scope, int32_t val) noexcept { return newConst(scope, &val, 4); }
ASMJIT_INLINE_NODEBUG Mem new_int32_const(ConstPoolScope scope, int32_t val) noexcept { return new_const(scope, &val, 4); }
//! Put a DWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt32Const(ConstPoolScope scope, uint32_t val) noexcept { return newConst(scope, &val, 4); }
ASMJIT_INLINE_NODEBUG Mem new_uint32_const(ConstPoolScope scope, uint32_t val) noexcept { return new_const(scope, &val, 4); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newInt64Const(ConstPoolScope scope, int64_t val) noexcept { return newConst(scope, &val, 8); }
ASMJIT_INLINE_NODEBUG Mem new_int64_const(ConstPoolScope scope, int64_t val) noexcept { return new_const(scope, &val, 8); }
//! Put a QWORD `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newUInt64Const(ConstPoolScope scope, uint64_t val) noexcept { return newConst(scope, &val, 8); }
ASMJIT_INLINE_NODEBUG Mem new_uint64_const(ConstPoolScope scope, uint64_t val) noexcept { return new_const(scope, &val, 8); }
//! Put a SP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newFloatConst(ConstPoolScope scope, float val) noexcept { return newConst(scope, &val, 4); }
ASMJIT_INLINE_NODEBUG Mem new_float_const(ConstPoolScope scope, float val) noexcept { return new_const(scope, &val, 4); }
//! Put a DP-FP `val` to a constant-pool.
ASMJIT_INLINE_NODEBUG Mem newDoubleConst(ConstPoolScope scope, double val) noexcept { return newConst(scope, &val, 8); }
ASMJIT_INLINE_NODEBUG Mem new_double_const(ConstPoolScope scope, double val) noexcept { return new_const(scope, &val, 8); }
//! \}
@@ -177,7 +164,7 @@ public:
//! \{
//! Force the compiler to not follow the conditional or unconditional jump.
ASMJIT_INLINE_NODEBUG Compiler& unfollow() noexcept { _instOptions |= InstOptions::kUnfollow; return *this; }
ASMJIT_INLINE_NODEBUG Compiler& unfollow() noexcept { _inst_options |= InstOptions::kUnfollow; return *this; }
//! \}
@@ -189,7 +176,7 @@ public:
//! \note At the moment this instruction is only useful to load a stack allocated address into a GP register
//! for further use. It makes very little sense to use it for anything else. The semantics of this instruction
//! is the same as X86 `LEA` (load effective address) instruction.
ASMJIT_INLINE_NODEBUG Error loadAddressOf(const Gp& o0, const Mem& o1) { return _emitter()->_emitI(Inst::kIdAdr, o0, o1); }
ASMJIT_INLINE_NODEBUG Error load_address_of(const Gp& o0, const Mem& o1) { return _emitter()->_emitI(Inst::kIdAdr, o0, o1); }
//! \}
@@ -197,8 +184,8 @@ public:
//! \{
//! Invoke a function call without `target` type enforcement.
ASMJIT_INLINE_NODEBUG Error invoke_(InvokeNode** out, const Operand_& target, const FuncSignature& signature) {
return addInvokeNode(out, Inst::kIdBlr, target, signature);
ASMJIT_INLINE_NODEBUG Error invoke_(Out<InvokeNode*> out, const Operand_& target, const FuncSignature& signature) {
return add_invoke_node(out, Inst::kIdBlr, target, signature);
}
//! Invoke a function call of the given `target` and `signature` and store the added node to `out`.
@@ -206,22 +193,22 @@ public:
//! Creates a new \ref InvokeNode, initializes all the necessary members to match the given function `signature`,
//! adds the node to the compiler, and stores its pointer to `out`. The operation is atomic, if anything fails
//! nullptr is stored in `out` and error code is returned.
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
ASMJIT_INLINE_NODEBUG Error invoke(Out<InvokeNode*> out, const Gp& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
ASMJIT_INLINE_NODEBUG Error invoke(Out<InvokeNode*> out, const Mem& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
ASMJIT_INLINE_NODEBUG Error invoke(Out<InvokeNode*> out, const Label& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
ASMJIT_INLINE_NODEBUG Error invoke(Out<InvokeNode*> out, const Imm& target, const FuncSignature& signature) { return invoke_(out, target, signature); }
//! \overload
ASMJIT_INLINE_NODEBUG Error invoke(InvokeNode** out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
ASMJIT_INLINE_NODEBUG Error invoke(Out<InvokeNode*> out, uint64_t target, const FuncSignature& signature) { return invoke_(out, Imm(int64_t(target)), signature); }
//! Return.
ASMJIT_INLINE_NODEBUG Error ret() { return addRet(Operand(), Operand()); }
ASMJIT_INLINE_NODEBUG Error ret() { return add_ret(Operand(), Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0) { return addRet(o0, Operand()); }
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0) { return add_ret(o0, Operand()); }
//! \overload
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0, const Reg& o1) { return addRet(o0, o1); }
ASMJIT_INLINE_NODEBUG Error ret(const Reg& o0, const Reg& o1) { return add_ret(o0, o1); }
//! \}
@@ -231,16 +218,16 @@ public:
using EmitterExplicitT<Compiler>::br;
//! Adds a jump to the given `target` with the provided jump `annotation`.
ASMJIT_INLINE_NODEBUG Error br(const Reg& target, JumpAnnotation* annotation) { return emitAnnotatedJump(Inst::kIdBr, target, annotation); }
ASMJIT_INLINE_NODEBUG Error br(const Reg& target, JumpAnnotation* annotation) { return emit_annotated_jump(Inst::kIdBr, target, annotation); }
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
ASMJIT_API Error on_attach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_detach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_reinit(CodeHolder& code) noexcept override;
//! \}

View File

@@ -21,22 +21,22 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::EmitHelper - Emit Operations
// =================================
ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
ASMJIT_FAVOR_SIZE Error EmitHelper::emit_reg_move(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment) {
const Operand_& src_, TypeId type_id, const char* comment) {
Emitter* emitter = _emitter->as<Emitter>();
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(typeId) && !TypeUtils::isAbstract(typeId));
ASMJIT_ASSERT(TypeUtils::is_valid(type_id) && !TypeUtils::is_abstract(type_id));
emitter->setInlineComment(comment);
emitter->set_inline_comment(comment);
if (dst_.isReg() && src_.isMem()) {
if (dst_.is_reg() && src_.is_mem()) {
Reg dst(dst_.as<Reg>());
Mem src(src_.as<Mem>());
switch (typeId) {
switch (type_id) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->ldrb(dst.as<Gp>(), src);
@@ -54,15 +54,15 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->ldr(dst.as<Gp>().x(), src);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
if (TypeUtils::is_float32(type_id) || TypeUtils::is_vec32(type_id)) {
return emitter->ldr(dst.as<Vec>().s(), src);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
if (TypeUtils::is_float64(type_id) || TypeUtils::is_vec64(type_id)) {
return emitter->ldr(dst.as<Vec>().d(), src);
}
if (TypeUtils::isVec128(typeId)) {
if (TypeUtils::is_vec128(type_id)) {
return emitter->ldr(dst.as<Vec>().q(), src);
}
@@ -71,11 +71,11 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
}
}
if (dst_.isMem() && src_.isReg()) {
if (dst_.is_mem() && src_.is_reg()) {
Mem dst(dst_.as<Mem>());
Reg src(src_.as<Reg>());
switch (typeId) {
switch (type_id) {
case TypeId::kInt8:
case TypeId::kUInt8:
return emitter->strb(src.as<Gp>(), dst);
@@ -93,15 +93,15 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->str(src.as<Gp>().x(), dst);
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
if (TypeUtils::is_float32(type_id) || TypeUtils::is_vec32(type_id)) {
return emitter->str(src.as<Vec>().s(), dst);
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
if (TypeUtils::is_float64(type_id) || TypeUtils::is_vec64(type_id)) {
return emitter->str(src.as<Vec>().d(), dst);
}
if (TypeUtils::isVec128(typeId)) {
if (TypeUtils::is_vec128(type_id)) {
return emitter->str(src.as<Vec>().q(), dst);
}
@@ -110,11 +110,11 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
}
}
if (dst_.isReg() && src_.isReg()) {
if (dst_.is_reg() && src_.is_reg()) {
Reg dst(dst_.as<Reg>());
Reg src(src_.as<Reg>());
switch (typeId) {
switch (type_id) {
case TypeId::kInt8:
case TypeId::kUInt8:
case TypeId::kInt16:
@@ -126,15 +126,15 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
return emitter->mov(dst.as<Gp>().x(), src.as<Gp>().x());
default: {
if (TypeUtils::isFloat32(typeId) || TypeUtils::isVec32(typeId)) {
if (TypeUtils::is_float32(type_id) || TypeUtils::is_vec32(type_id)) {
return emitter->fmov(dst.as<Vec>().s(), src.as<Vec>().s());
}
if (TypeUtils::isFloat64(typeId) || TypeUtils::isVec64(typeId)) {
if (TypeUtils::is_float64(type_id) || TypeUtils::is_vec64(type_id)) {
return emitter->mov(dst.as<Vec>().b8(), src.as<Vec>().b8());
}
if (TypeUtils::isVec128(typeId)) {
if (TypeUtils::is_vec128(type_id)) {
return emitter->mov(dst.as<Vec>().b16(), src.as<Vec>().b16());
}
@@ -143,101 +143,101 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitRegMove(
}
}
emitter->setInlineComment(nullptr);
return DebugUtils::errored(kErrorInvalidState);
emitter->set_inline_comment(nullptr);
return make_error(Error::kInvalidState);
}
Error EmitHelper::emitRegSwap(
Error EmitHelper::emit_reg_swap(
const Reg& a,
const Reg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
Support::maybe_unused(a, b, comment);
return make_error(Error::kInvalidState);
}
// TODO: [ARM] EmitArgMove is unfinished.
Error EmitHelper::emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment) {
Error EmitHelper::emit_arg_move(
const Reg& dst_, TypeId dst_type_id,
const Operand_& src_, TypeId src_type_id, const char* comment) {
// Deduce optional `dstTypeId`, which may be `TypeId::kVoid` in some cases.
if (dstTypeId == TypeId::kVoid) {
dstTypeId = RegUtils::typeIdOf(dst_.regType());
// Deduce optional `dst_type_id`, which may be `TypeId::kVoid` in some cases.
if (dst_type_id == TypeId::kVoid) {
dst_type_id = RegUtils::type_id_of(dst_.reg_type());
}
// Invalid or abstract TypeIds are not allowed.
ASMJIT_ASSERT(TypeUtils::isValid(dstTypeId) && !TypeUtils::isAbstract(dstTypeId));
ASMJIT_ASSERT(TypeUtils::isValid(srcTypeId) && !TypeUtils::isAbstract(srcTypeId));
ASMJIT_ASSERT(TypeUtils::is_valid(dst_type_id) && !TypeUtils::is_abstract(dst_type_id));
ASMJIT_ASSERT(TypeUtils::is_valid(src_type_id) && !TypeUtils::is_abstract(src_type_id));
Reg dst(dst_.as<Reg>());
Operand src(src_);
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
uint32_t dst_size = TypeUtils::size_of(dst_type_id);
uint32_t src_size = TypeUtils::size_of(src_type_id);
if (TypeUtils::isInt(dstTypeId)) {
if (TypeUtils::isInt(srcTypeId)) {
uint32_t x = uint32_t(dstSize == 8);
if (TypeUtils::is_int(dst_type_id)) {
if (TypeUtils::is_int(src_type_id)) {
uint32_t x = uint32_t(dst_size == 8);
dst.setSignature(OperandSignature{x ? RegTraits<RegType::kGp64>::kSignature : RegTraits<RegType::kGp32>::kSignature});
_emitter->setInlineComment(comment);
dst.set_signature(OperandSignature{x ? RegTraits<RegType::kGp64>::kSignature : RegTraits<RegType::kGp32>::kSignature});
_emitter->set_inline_comment(comment);
if (src.isReg()) {
src.setSignature(dst.signature());
if (src.is_reg()) {
src.set_signature(dst.signature());
return _emitter->emit(Inst::kIdMov, dst, src);
}
else if (src.isMem()) {
InstId instId = Inst::kIdNone;
switch (srcTypeId) {
case TypeId::kInt8: instId = Inst::kIdLdrsb; break;
case TypeId::kUInt8: instId = Inst::kIdLdrb; break;
case TypeId::kInt16: instId = Inst::kIdLdrsh; break;
case TypeId::kUInt16: instId = Inst::kIdLdrh; break;
case TypeId::kInt32: instId = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
case TypeId::kUInt32: instId = Inst::kIdLdr; break;
case TypeId::kInt64: instId = Inst::kIdLdr; break;
case TypeId::kUInt64: instId = Inst::kIdLdr; break;
else if (src.is_mem()) {
InstId inst_id = Inst::kIdNone;
switch (src_type_id) {
case TypeId::kInt8: inst_id = Inst::kIdLdrsb; break;
case TypeId::kUInt8: inst_id = Inst::kIdLdrb; break;
case TypeId::kInt16: inst_id = Inst::kIdLdrsh; break;
case TypeId::kUInt16: inst_id = Inst::kIdLdrh; break;
case TypeId::kInt32: inst_id = x ? Inst::kIdLdrsw : Inst::kIdLdr; break;
case TypeId::kUInt32: inst_id = Inst::kIdLdr; break;
case TypeId::kInt64: inst_id = Inst::kIdLdr; break;
case TypeId::kUInt64: inst_id = Inst::kIdLdr; break;
default:
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
return _emitter->emit(instId, dst, src);
return _emitter->emit(inst_id, dst, src);
}
}
}
if (TypeUtils::isFloat(dstTypeId) || TypeUtils::isVec(dstTypeId)) {
if (TypeUtils::isFloat(srcTypeId) || TypeUtils::isVec(srcTypeId)) {
switch (srcSize) {
case 2: dst.as<Vec>().setSignature(RegTraits<RegType::kVec16>::kSignature); break;
case 4: dst.as<Vec>().setSignature(RegTraits<RegType::kVec32>::kSignature); break;
case 8: dst.as<Vec>().setSignature(RegTraits<RegType::kVec64>::kSignature); break;
case 16: dst.as<Vec>().setSignature(RegTraits<RegType::kVec128>::kSignature); break;
if (TypeUtils::is_float(dst_type_id) || TypeUtils::is_vec(dst_type_id)) {
if (TypeUtils::is_float(src_type_id) || TypeUtils::is_vec(src_type_id)) {
switch (src_size) {
case 2: dst.as<Vec>().set_signature(RegTraits<RegType::kVec16>::kSignature); break;
case 4: dst.as<Vec>().set_signature(RegTraits<RegType::kVec32>::kSignature); break;
case 8: dst.as<Vec>().set_signature(RegTraits<RegType::kVec64>::kSignature); break;
case 16: dst.as<Vec>().set_signature(RegTraits<RegType::kVec128>::kSignature); break;
default:
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
_emitter->setInlineComment(comment);
_emitter->set_inline_comment(comment);
if (src.isReg()) {
InstId instId = srcSize <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
src.setSignature(dst.signature());
return _emitter->emit(instId, dst, src);
if (src.is_reg()) {
InstId inst_id = src_size <= 4 ? Inst::kIdFmov_v : Inst::kIdMov_v;
src.set_signature(dst.signature());
return _emitter->emit(inst_id, dst, src);
}
else if (src.isMem()) {
else if (src.is_mem()) {
return _emitter->emit(Inst::kIdLdr_v, dst, src);
}
}
}
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
// a64::EmitHelper - Emit Prolog & Epilog
// ======================================
struct LoadStoreInstructions {
InstId singleInstId;
InstId pairInstId;
InstId single_inst_id;
InstId pair_inst_id;
};
struct PrologEpilogInfo {
@@ -248,122 +248,122 @@ struct PrologEpilogInfo {
struct GroupData {
RegPair pairs[16];
uint32_t pairCount;
uint32_t pair_count;
};
Support::Array<GroupData, 2> groups;
uint32_t sizeTotal;
uint32_t size_total;
Error init(const FuncFrame& frame) noexcept {
uint32_t offset = 0;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
for (RegGroup group : Support::enumerate(RegGroup::kGp, RegGroup::kVec)) {
GroupData& data = groups[group];
uint32_t n = 0;
uint32_t pairCount = 0;
uint32_t pair_count = 0;
RegPair* pairs = data.pairs;
uint32_t slotSize = frame.saveRestoreRegSize(group);
uint32_t savedRegs = frame.savedRegs(group);
uint32_t slot_size = frame.save_restore_reg_size(group);
RegMask saved_regs = frame.saved_regs(group);
if (group == RegGroup::kGp && frame.hasPreservedFP()) {
if (group == RegGroup::kGp && frame.has_preserved_fp()) {
// Must be at the beginning of the push/pop sequence.
ASMJIT_ASSERT(pairCount == 0);
ASMJIT_ASSERT(pair_count == 0);
pairs[0].offset = uint16_t(offset);
pairs[0].ids[0] = Gp::kIdFp;
pairs[0].ids[1] = Gp::kIdLr;
offset += slotSize * 2;
pairCount++;
offset += slot_size * 2;
pair_count++;
savedRegs &= ~Support::bitMask(Gp::kIdFp, Gp::kIdLr);
saved_regs &= ~Support::bit_mask<RegMask>(Gp::kIdFp, Gp::kIdLr);
}
Support::BitWordIterator<uint32_t> it(savedRegs);
while (it.hasNext()) {
pairs[pairCount].ids[n] = uint8_t(it.next());
Support::BitWordIterator<uint32_t> it(saved_regs);
while (it.has_next()) {
pairs[pair_count].ids[n] = uint8_t(it.next());
if (++n == 2) {
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
pairs[pair_count].offset = uint16_t(offset);
offset += slot_size * 2;
n = 0;
pairCount++;
pair_count++;
}
}
if (n == 1) {
pairs[pairCount].ids[1] = uint8_t(Reg::kIdBad);
pairs[pairCount].offset = uint16_t(offset);
offset += slotSize * 2;
pairCount++;
pairs[pair_count].ids[1] = uint8_t(Reg::kIdBad);
pairs[pair_count].offset = uint16_t(offset);
offset += slot_size * 2;
pair_count++;
}
data.pairCount = pairCount;
data.pair_count = pair_count;
}
sizeTotal = offset;
return kErrorOk;
size_total = offset;
return Error::kOk;
}
};
ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
ASMJIT_FAVOR_SIZE Error EmitHelper::emit_prolog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
static const Support::Array<Reg, 2> group_regs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> group_insts = {{
{ Inst::kIdStr , Inst::kIdStp },
{ Inst::kIdStr_v, Inst::kIdStp_v }
}};
// Emit: 'bti {jc}' (indirect branch protection).
if (frame.hasIndirectBranchProtection()) {
if (frame.has_indirect_branch_protection()) {
ASMJIT_PROPAGATE(emitter->bti(Predicate::BTI::kJC));
}
uint32_t adjustInitialOffset = pei.sizeTotal;
uint32_t adjust_initial_offset = pei.size_total;
for (RegGroup group : Support::EnumValues<RegGroup, RegGroup::kGp, RegGroup::kVec>{}) {
for (RegGroup group : Support::enumerate(RegGroup::kGp, RegGroup::kVec)) {
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
uint32_t pair_count = data.pair_count;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Reg regs[2] = { group_regs[group], group_regs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
for (uint32_t i = 0; i < pairCount; i++) {
const LoadStoreInstructions& insts = group_insts[group];
for (uint32_t i = 0; i < pair_count; i++) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
regs[0].set_id(pair.ids[0]);
regs[1].set_id(pair.ids[1]);
mem.set_offset_lo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(-int(adjustInitialOffset));
mem.makePreIndex();
if (pair.offset == 0 && adjust_initial_offset) {
mem.set_offset(-int(adjust_initial_offset));
mem.make_pre_index();
}
if (pair.ids[1] == Reg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
ASMJIT_PROPAGATE(emitter->emit(insts.single_inst_id, regs[0], mem));
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
ASMJIT_PROPAGATE(emitter->emit(insts.pair_inst_id, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
mem.reset_offset_mode();
if (i == 0 && frame.hasPreservedFP()) {
if (i == 0 && frame.has_preserved_fp()) {
ASMJIT_PROPAGATE(emitter->mov(x29, sp));
}
}
}
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (frame.has_stack_adjustment()) {
uint32_t adj = frame.stack_adjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj));
}
@@ -373,30 +373,30 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitProlog(const FuncFrame& frame) {
ASMJIT_PROPAGATE(emitter->sub(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
}
return kErrorOk;
return Error::kOk;
}
// TODO: [ARM] Emit epilog.
ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
ASMJIT_FAVOR_SIZE Error EmitHelper::emit_epilog(const FuncFrame& frame) {
Emitter* emitter = _emitter->as<Emitter>();
PrologEpilogInfo pei;
ASMJIT_PROPAGATE(pei.init(frame));
static const Support::Array<Reg, 2> groupRegs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> groupInsts = {{
static const Support::Array<Reg, 2> group_regs = {{ x0, d0 }};
static const Support::Array<LoadStoreInstructions, 2> group_insts = {{
{ Inst::kIdLdr , Inst::kIdLdp },
{ Inst::kIdLdr_v, Inst::kIdLdp_v }
}};
uint32_t adjustInitialOffset = pei.sizeTotal;
uint32_t adjust_initial_offset = pei.size_total;
if (frame.hasStackAdjustment()) {
uint32_t adj = frame.stackAdjustment();
if (frame.has_stack_adjustment()) {
uint32_t adj = frame.stack_adjustment();
if (adj <= 0xFFFu) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj));
}
@@ -405,70 +405,70 @@ ASMJIT_FAVOR_SIZE Error EmitHelper::emitEpilog(const FuncFrame& frame) {
ASMJIT_PROPAGATE(emitter->add(sp, sp, adj & 0xFFF000u));
}
else {
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
}
for (int g = 1; g >= 0; g--) {
RegGroup group = RegGroup(g);
const PrologEpilogInfo::GroupData& data = pei.groups[group];
uint32_t pairCount = data.pairCount;
uint32_t pair_count = data.pair_count;
Reg regs[2] = { groupRegs[group], groupRegs[group] };
Reg regs[2] = { group_regs[group], group_regs[group] };
Mem mem = ptr(sp);
const LoadStoreInstructions& insts = groupInsts[group];
const LoadStoreInstructions& insts = group_insts[group];
for (int i = int(pairCount) - 1; i >= 0; i--) {
for (int i = int(pair_count) - 1; i >= 0; i--) {
const PrologEpilogInfo::RegPair& pair = data.pairs[i];
regs[0].setId(pair.ids[0]);
regs[1].setId(pair.ids[1]);
mem.setOffsetLo32(pair.offset);
regs[0].set_id(pair.ids[0]);
regs[1].set_id(pair.ids[1]);
mem.set_offset_lo32(pair.offset);
if (pair.offset == 0 && adjustInitialOffset) {
mem.setOffset(int(adjustInitialOffset));
mem.makePostIndex();
if (pair.offset == 0 && adjust_initial_offset) {
mem.set_offset(int(adjust_initial_offset));
mem.make_post_index();
}
if (pair.ids[1] == Reg::kIdBad) {
ASMJIT_PROPAGATE(emitter->emit(insts.singleInstId, regs[0], mem));
ASMJIT_PROPAGATE(emitter->emit(insts.single_inst_id, regs[0], mem));
}
else {
ASMJIT_PROPAGATE(emitter->emit(insts.pairInstId, regs[0], regs[1], mem));
ASMJIT_PROPAGATE(emitter->emit(insts.pair_inst_id, regs[0], regs[1], mem));
}
mem.resetOffsetMode();
mem.reset_offset_mode();
}
}
ASMJIT_PROPAGATE(emitter->ret(x30));
return kErrorOk;
return Error::kOk;
}
static Error ASMJIT_CDECL Emitter_emitProlog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitProlog(frame);
EmitHelper emit_helper(emitter);
return emit_helper.emit_prolog(frame);
}
static Error ASMJIT_CDECL Emitter_emitEpilog(BaseEmitter* emitter, const FuncFrame& frame) {
EmitHelper emitHelper(emitter);
return emitHelper.emitEpilog(frame);
EmitHelper emit_helper(emitter);
return emit_helper.emit_epilog(frame);
}
static Error ASMJIT_CDECL Emitter_emitArgsAssignment(BaseEmitter* emitter, const FuncFrame& frame, const FuncArgsAssignment& args) {
EmitHelper emitHelper(emitter);
return emitHelper.emitArgsAssignment(frame, args);
EmitHelper emit_helper(emitter);
return emit_helper.emit_args_assignment(frame, args);
}
void initEmitterFuncs(BaseEmitter* emitter) {
emitter->_funcs.emitProlog = Emitter_emitProlog;
emitter->_funcs.emitEpilog = Emitter_emitEpilog;
emitter->_funcs.emitArgsAssignment = Emitter_emitArgsAssignment;
void init_emitter_funcs(BaseEmitter* emitter) {
emitter->_funcs.emit_prolog = Emitter_emitProlog;
emitter->_funcs.emit_epilog = Emitter_emitEpilog;
emitter->_funcs.emit_args_assignment = Emitter_emitArgsAssignment;
#ifndef ASMJIT_NO_LOGGING
emitter->_funcs.formatInstruction = FormatterInternal::formatInstruction;
emitter->_funcs.format_instruction = FormatterInternal::format_instruction;
#endif
#ifndef ASMJIT_NO_VALIDATION

View File

@@ -26,26 +26,30 @@ public:
ASMJIT_INLINE_NODEBUG virtual ~EmitHelper() noexcept = default;
Error emitRegMove(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr) override;
ASMJIT_INLINE void reset(BaseEmitter* emitter) noexcept {
_emitter = emitter;
}
Error emitRegSwap(
Error emit_reg_move(
const Operand_& dst_,
const Operand_& src_, TypeId type_id, const char* comment = nullptr) override;
Error emit_reg_swap(
const Reg& a,
const Reg& b, const char* comment = nullptr) override;
Error emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr) override;
Error emit_arg_move(
const Reg& dst_, TypeId dst_type_id,
const Operand_& src_, TypeId src_type_id, const char* comment = nullptr) override;
Error emitProlog(const FuncFrame& frame);
Error emitEpilog(const FuncFrame& frame);
Error emit_prolog(const FuncFrame& frame);
Error emit_epilog(const FuncFrame& frame);
};
void initEmitterFuncs(BaseEmitter* emitter);
void init_emitter_funcs(BaseEmitter* emitter);
[[maybe_unused]]
static inline void updateEmitterFuncs(BaseEmitter* emitter) noexcept { DebugUtils::unused(emitter); }
static inline void update_emitter_funcs(BaseEmitter* emitter) noexcept { Support::maybe_unused(emitter); }
//! \}
//! \endcond

View File

@@ -45,25 +45,25 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
#define ASMJIT_INST_1cc(NAME, ID, T0) \
inline Error NAME(const T0& o0) { return _emitter()->_emitI(Inst::kId##ID, o0); } \
\
inline Error NAME(CondCode cc, const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, cc), o0); } \
inline Error NAME(CondCode cc, const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, cc), o0); } \
\
inline Error NAME##_eq(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kEQ), o0); } \
inline Error NAME##_ne(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kNE), o0); } \
inline Error NAME##_cs(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kCS), o0); } \
inline Error NAME##_hs(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kHS), o0); } \
inline Error NAME##_cc(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kCC), o0); } \
inline Error NAME##_lo(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kLO), o0); } \
inline Error NAME##_mi(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kMI), o0); } \
inline Error NAME##_pl(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kPL), o0); } \
inline Error NAME##_vs(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kVS), o0); } \
inline Error NAME##_vc(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kVC), o0); } \
inline Error NAME##_hi(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kHI), o0); } \
inline Error NAME##_ls(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kLS), o0); } \
inline Error NAME##_ge(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kGE), o0); } \
inline Error NAME##_lt(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kLT), o0); } \
inline Error NAME##_gt(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kGT), o0); } \
inline Error NAME##_le(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kLE), o0); } \
inline Error NAME##_al(const T0& o0) { return _emitter()->_emitI(BaseInst::composeARMInstId(Inst::kId##ID, CondCode::kAL), o0); }
inline Error NAME##_eq(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kEQ), o0); } \
inline Error NAME##_ne(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kNE), o0); } \
inline Error NAME##_cs(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kCS), o0); } \
inline Error NAME##_hs(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kHS), o0); } \
inline Error NAME##_cc(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kCC), o0); } \
inline Error NAME##_lo(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kLO), o0); } \
inline Error NAME##_mi(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kMI), o0); } \
inline Error NAME##_pl(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kPL), o0); } \
inline Error NAME##_vs(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kVS), o0); } \
inline Error NAME##_vc(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kVC), o0); } \
inline Error NAME##_hi(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kHI), o0); } \
inline Error NAME##_ls(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kLS), o0); } \
inline Error NAME##_ge(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kGE), o0); } \
inline Error NAME##_lt(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kLT), o0); } \
inline Error NAME##_gt(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kGT), o0); } \
inline Error NAME##_le(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kLE), o0); } \
inline Error NAME##_al(const T0& o0) { return _emitter()->_emitI(BaseInst::compose_arm_inst_id(Inst::kId##ID, CondCode::kAL), o0); }
//! \addtogroup asmjit_a64
//! \{
@@ -89,9 +89,9 @@ struct EmitterExplicitT {
//! \{
//! Returns either 32-bit or 64-bit GP register of the given `id` depending on the emitter's architecture.
inline Gp gpz(uint32_t id) const noexcept { return Gp(_emitter()->_gpSignature, id); }
inline Gp gpz(uint32_t id) const noexcept { return Gp(_emitter()->_gp_signature, id); }
//! Clones the given `reg` to either 32-bit or 64-bit GP register depending on the emitter's architecture.
inline Gp gpz(const Gp& reg) const noexcept { return Gp(_emitter()->_gpSignature, reg.id()); }
inline Gp gpz(const Gp& reg) const noexcept { return Gp(_emitter()->_gp_signature, reg.id()); }
//! \}

View File

@@ -22,43 +22,43 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
// a64::FormatterInternal - Format Instruction
// ===========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatInstruction(
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_instruction(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
const BaseInst& inst, Span<const Operand_> operands) noexcept {
// Format instruction options and instruction mnemonic.
InstId instId = inst.realId();
if (instId != Inst::kIdNone && instId < Inst::_kIdCount) {
InstStringifyOptions stringifyOptions =
Support::test(formatFlags, FormatFlags::kShowAliases)
InstId inst_id = inst.real_id();
if (inst_id != Inst::kIdNone && inst_id < Inst::_kIdCount) {
InstStringifyOptions stringify_options =
Support::test(format_flags, FormatFlags::kShowAliases)
? InstStringifyOptions::kAliases
: InstStringifyOptions::kNone;
ASMJIT_PROPAGATE(InstInternal::instIdToString(instId, stringifyOptions, sb));
ASMJIT_PROPAGATE(InstInternal::inst_id_to_string(inst_id, stringify_options, sb));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("[InstId=#%u]", unsigned(instId)));
ASMJIT_PROPAGATE(sb.append_format("[InstId=#%u]", unsigned(inst_id)));
}
CondCode cc = inst.armCondCode();
CondCode cc = inst.arm_cond_code();
if (cc != CondCode::kAL) {
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatCondCode(sb, cc));
ASMJIT_PROPAGATE(format_cond_code(sb, cc));
}
for (uint32_t i = 0; i < opCount; i++) {
for (size_t i = 0u; i < operands.size(); i++) {
const Operand_& op = operands[i];
if (op.isNone()) {
if (op.is_none()) {
break;
}
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatFlags, emitter, arch, op));
ASMJIT_PROPAGATE(format_operand(sb, format_flags, emitter, arch, op));
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_END_SUB_NAMESPACE

View File

@@ -10,6 +10,7 @@
#ifndef ASMJIT_NO_LOGGING
#include "../core/formatter.h"
#include "../core/span.h"
#include "../core/string.h"
#include "../arm/armformatter_p.h"
#include "../arm/a64globals.h"
@@ -24,12 +25,12 @@ namespace FormatterInternal {
using namespace arm::FormatterInternal;
Error ASMJIT_CDECL formatInstruction(
Error ASMJIT_CDECL format_instruction(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
const BaseInst& inst, Span<const Operand_> operands) noexcept;
} // {FormatterInternal}

View File

@@ -13,31 +13,31 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace FuncInternal {
static inline bool shouldTreatAsCDecl(CallConvId ccId) noexcept {
return ccId == CallConvId::kCDecl ||
ccId == CallConvId::kStdCall ||
ccId == CallConvId::kFastCall ||
ccId == CallConvId::kVectorCall ||
ccId == CallConvId::kThisCall ||
ccId == CallConvId::kRegParm1 ||
ccId == CallConvId::kRegParm2 ||
ccId == CallConvId::kRegParm3;
static inline bool should_treat_as_cdecl(CallConvId call_conv_id) noexcept {
return call_conv_id == CallConvId::kCDecl ||
call_conv_id == CallConvId::kStdCall ||
call_conv_id == CallConvId::kFastCall ||
call_conv_id == CallConvId::kVectorCall ||
call_conv_id == CallConvId::kThisCall ||
call_conv_id == CallConvId::kRegParm1 ||
call_conv_id == CallConvId::kRegParm2 ||
call_conv_id == CallConvId::kRegParm3;
}
static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
if (typeId == TypeId::kFloat32) {
static RegType reg_type_from_fp_or_vec_type_id(TypeId type_id) noexcept {
if (type_id == TypeId::kFloat32) {
return RegType::kVec32;
}
else if (typeId == TypeId::kFloat64) {
else if (type_id == TypeId::kFloat64) {
return RegType::kVec64;
}
else if (TypeUtils::isVec32(typeId)) {
else if (TypeUtils::is_vec32(type_id)) {
return RegType::kVec32;
}
else if (TypeUtils::isVec64(typeId)) {
else if (TypeUtils::is_vec64(type_id)) {
return RegType::kVec64;
}
else if (TypeUtils::isVec128(typeId)) {
else if (TypeUtils::is_vec128(type_id)) {
return RegType::kVec128;
}
else {
@@ -45,88 +45,86 @@ static RegType regTypeFromFpOrVecTypeId(TypeId typeId) noexcept {
}
}
ASMJIT_FAVOR_SIZE Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept {
cc.setArch(environment.arch());
cc.setStrategy(environment.isDarwin() ? CallConvStrategy::kAArch64Apple : CallConvStrategy::kDefault);
ASMJIT_FAVOR_SIZE Error init_call_conv(CallConv& cc, CallConvId call_conv_id, const Environment& environment) noexcept {
cc.set_arch(environment.arch());
cc.set_strategy(environment.is_darwin_abi() ? CallConvStrategy::kAArch64Apple : CallConvStrategy::kDefault);
cc.setSaveRestoreRegSize(RegGroup::kGp, 8);
cc.setSaveRestoreRegSize(RegGroup::kVec, 8);
cc.setSaveRestoreAlignment(RegGroup::kGp, 16);
cc.setSaveRestoreAlignment(RegGroup::kVec, 16);
cc.setSaveRestoreAlignment(RegGroup::kMask, 1);
cc.setSaveRestoreAlignment(RegGroup::kExtraVirt3, 1);
cc.setPassedOrder(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setPassedOrder(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
cc.setNaturalStackAlignment(16);
cc.set_save_restore_reg_size(RegGroup::kGp, 8);
cc.set_save_restore_reg_size(RegGroup::kVec, 8);
cc.set_save_restore_alignment(RegGroup::kGp, 16);
cc.set_save_restore_alignment(RegGroup::kVec, 16);
cc.set_save_restore_alignment(RegGroup::kMask, 8);
cc.set_save_restore_alignment(RegGroup::kExtra, 1);
cc.set_passed_order(RegGroup::kGp, 0, 1, 2, 3, 4, 5, 6, 7);
cc.set_passed_order(RegGroup::kVec, 0, 1, 2, 3, 4, 5, 6, 7);
cc.set_natural_stack_alignment(16);
if (shouldTreatAsCDecl(ccId)) {
if (should_treat_as_cdecl(call_conv_id)) {
// ARM doesn't have that many calling conventions as we can find in X86 world, treat most conventions as __cdecl.
cc.setId(CallConvId::kCDecl);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(8, 9, 10, 11, 12, 13, 14, 15));
cc.set_id(CallConvId::kCDecl);
cc.set_preserved_regs(RegGroup::kGp, Support::bit_mask<RegMask>(Gp::kIdOs, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.set_preserved_regs(RegGroup::kVec, Support::bit_mask<RegMask>(8, 9, 10, 11, 12, 13, 14, 15));
}
else {
cc.setId(ccId);
cc.setSaveRestoreRegSize(RegGroup::kVec, 16);
cc.setPreservedRegs(RegGroup::kGp, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.setPreservedRegs(RegGroup::kVec, Support::bitMask(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
cc.set_id(call_conv_id);
cc.set_save_restore_reg_size(RegGroup::kVec, 16);
cc.set_preserved_regs(RegGroup::kGp, Support::bit_mask<RegMask>(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30));
cc.set_preserved_regs(RegGroup::kVec, Support::bit_mask<RegMask>(4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& signature) noexcept {
DebugUtils::unused(signature);
ASMJIT_FAVOR_SIZE Error init_func_detail(FuncDetail& func, const FuncSignature& signature) noexcept {
Support::maybe_unused(signature);
const CallConv& cc = func.callConv();
uint32_t stackOffset = 0;
uint32_t i;
uint32_t argCount = func.argCount();
const CallConv& cc = func.call_conv();
uint32_t arg_count = func.arg_count();
uint32_t stack_offset = 0;
// Minimum stack size of a single argument passed via stack. The standard AArch64 calling convention
// specifies 8 bytes, so each function argument would occupy at least 8 bytes even if it needs less.
// However, Apple has decided to not follow this rule and function argument can occupy less, for
// example two consecutive 32-bit arguments would occupy 8 bytes total, instead of 16 as specified
// by ARM.
uint32_t minStackArgSize = cc.strategy() == CallConvStrategy::kAArch64Apple ? 4u : 8u;
uint32_t min_stack_arg_size = cc.strategy() == CallConvStrategy::kAArch64Apple ? 4u : 8u;
if (func.hasRet()) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
TypeId typeId = func._rets[valueIndex].typeId();
if (func.has_ret()) {
for (uint32_t value_index = 0; value_index < Globals::kMaxValuePack; value_index++) {
TypeId type_id = func._rets[value_index].type_id();
// Terminate at the first void type (end of the pack).
if (typeId == TypeId::kVoid)
if (type_id == TypeId::kVoid)
break;
switch (typeId) {
switch (type_id) {
case TypeId::kInt8:
case TypeId::kInt16:
case TypeId::kInt32: {
func._rets[valueIndex].initReg(RegType::kGp32, valueIndex, TypeId::kInt32);
func._rets[value_index].init_reg(RegType::kGp32, value_index, TypeId::kInt32);
break;
}
case TypeId::kUInt8:
case TypeId::kUInt16:
case TypeId::kUInt32: {
func._rets[valueIndex].initReg(RegType::kGp32, valueIndex, TypeId::kUInt32);
func._rets[value_index].init_reg(RegType::kGp32, value_index, TypeId::kUInt32);
break;
}
case TypeId::kInt64:
case TypeId::kUInt64: {
func._rets[valueIndex].initReg(RegType::kGp64, valueIndex, typeId);
func._rets[value_index].init_reg(RegType::kGp64, value_index, type_id);
break;
}
default: {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
RegType reg_type = reg_type_from_fp_or_vec_type_id(type_id);
if (reg_type == RegType::kNone) {
return make_error(Error::kInvalidRegType);
}
func._rets[valueIndex].initReg(regType, valueIndex, typeId);
func._rets[value_index].init_reg(reg_type, value_index, type_id);
break;
}
}
@@ -136,62 +134,62 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
switch (cc.strategy()) {
case CallConvStrategy::kDefault:
case CallConvStrategy::kAArch64Apple: {
uint32_t gpzPos = 0;
uint32_t vecPos = 0;
uint32_t gpz_pos = 0;
uint32_t vec_pos = 0;
for (i = 0; i < argCount; i++) {
for (uint32_t i = 0; i < arg_count; i++) {
FuncValue& arg = func._args[i][0];
TypeId typeId = arg.typeId();
TypeId type_id = arg.type_id();
if (TypeUtils::isInt(typeId)) {
uint32_t regId = Reg::kIdBad;
if (TypeUtils::is_int(type_id)) {
uint32_t reg_id = Reg::kIdBad;
if (gpzPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kGp].id[gpzPos];
if (gpz_pos < CallConv::kMaxRegArgsPerGroup) {
reg_id = cc._passed_order[RegGroup::kGp].id[gpz_pos];
}
if (regId != Reg::kIdBad) {
RegType regType = typeId <= TypeId::kUInt32 ? RegType::kGp32 : RegType::kGp64;
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kGp, Support::bitMask(regId));
gpzPos++;
if (reg_id != Reg::kIdBad) {
RegType reg_type = type_id <= TypeId::kUInt32 ? RegType::kGp32 : RegType::kGp64;
arg.assign_reg_data(reg_type, reg_id);
func.add_used_regs(RegGroup::kGp, Support::bit_mask<RegMask>(reg_id));
gpz_pos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
uint32_t size = Support::max<uint32_t>(TypeUtils::size_of(type_id), min_stack_arg_size);
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
stack_offset = Support::align_up(stack_offset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
arg.assign_stack_offset(int32_t(stack_offset));
stack_offset += size;
}
continue;
}
if (TypeUtils::isFloat(typeId) || TypeUtils::isVec(typeId)) {
uint32_t regId = Reg::kIdBad;
if (TypeUtils::is_float(type_id) || TypeUtils::is_vec(type_id)) {
uint32_t reg_id = Reg::kIdBad;
if (vecPos < CallConv::kMaxRegArgsPerGroup) {
regId = cc._passedOrder[RegGroup::kVec].id[vecPos];
if (vec_pos < CallConv::kMaxRegArgsPerGroup) {
reg_id = cc._passed_order[RegGroup::kVec].id[vec_pos];
}
if (regId != Reg::kIdBad) {
RegType regType = regTypeFromFpOrVecTypeId(typeId);
if (regType == RegType::kNone) {
return DebugUtils::errored(kErrorInvalidRegType);
if (reg_id != Reg::kIdBad) {
RegType reg_type = reg_type_from_fp_or_vec_type_id(type_id);
if (reg_type == RegType::kNone) {
return make_error(Error::kInvalidRegType);
}
arg.initTypeId(typeId);
arg.assignRegData(regType, regId);
func.addUsedRegs(RegGroup::kVec, Support::bitMask(regId));
vecPos++;
arg.init_type_id(type_id);
arg.assign_reg_data(reg_type, reg_id);
func.add_used_regs(RegGroup::kVec, Support::bit_mask<RegMask>(reg_id));
vec_pos++;
}
else {
uint32_t size = Support::max<uint32_t>(TypeUtils::sizeOf(typeId), minStackArgSize);
uint32_t size = Support::max<uint32_t>(TypeUtils::size_of(type_id), min_stack_arg_size);
if (size >= 8) {
stackOffset = Support::alignUp(stackOffset, 8);
stack_offset = Support::align_up(stack_offset, 8);
}
arg.assignStackOffset(int32_t(stackOffset));
stackOffset += size;
arg.assign_stack_offset(int32_t(stack_offset));
stack_offset += size;
}
continue;
}
@@ -200,11 +198,11 @@ ASMJIT_FAVOR_SIZE Error initFuncDetail(FuncDetail& func, const FuncSignature& si
}
default:
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
func._argStackSize = Support::alignUp(stackOffset, 8u);
return kErrorOk;
func._arg_stack_size = Support::align_up(stack_offset, 8u);
return Error::kOk;
}
} // {FuncInternal}

View File

@@ -18,10 +18,10 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace FuncInternal {
//! Initialize `CallConv` structure (AArch64 specific).
Error initCallConv(CallConv& cc, CallConvId ccId, const Environment& environment) noexcept;
Error init_call_conv(CallConv& cc, CallConvId call_conv_id, const Environment& environment) noexcept;
//! Initialize `FuncDetail` (AArch64 specific).
Error initFuncDetail(FuncDetail& func, const FuncSignature& signature) noexcept;
Error init_func_detail(FuncDetail& func, const FuncSignature& signature) noexcept;
} // {FuncInternal}

View File

@@ -805,16 +805,16 @@ namespace Inst {
// ${InstId:End}
};
//! Tests whether the `instId` is defined (counts also Inst::kIdNone, which must be zero).
static ASMJIT_INLINE_NODEBUG bool isDefinedId(InstId instId) noexcept { return (instId & uint32_t(InstIdParts::kRealId)) < _kIdCount; }
};
//! Tests whether the `inst_id` is defined (counts also Inst::kIdNone, which must be zero).
static ASMJIT_INLINE_NODEBUG bool is_defined_id(InstId inst_id) noexcept { return (inst_id & uint32_t(InstIdParts::kRealId)) < _kIdCount; }
}
namespace Predicate {
//! Address translate options (AT).
namespace AT {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) noexcept {
return (op1 << 11) | (crn << 7) | (crm << 3) | (op2 << 0);
}
enum Value : uint32_t {
@@ -882,8 +882,8 @@ namespace DB {
//! Data cache maintenance options.
namespace DC {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) noexcept {
return (op1 << 11) | (crn << 7) | (crm << 3) | (op2 << 0);
}
//! Data cache maintenance immediate values.
@@ -921,8 +921,8 @@ namespace DC {
//! Instruction cache maintenance options.
namespace IC {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) noexcept {
return (op1 << 11) | (crn << 7) | (crm << 3) | (op2 << 0);
}
//! Instruction cache maintenance immediate values.
@@ -975,8 +975,8 @@ namespace PSB {
}
namespace TLBI {
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) noexcept {
return (op1 << 11) | (crn << 7) | (crm << 3) | (op2 << 0);
}
enum Value : uint32_t {
@@ -1089,7 +1089,7 @@ namespace PState {
kSSBS = encode(0b011, 0b001),
kTCO = encode(0b011, 0b100)
};
};
}
//! System register identifiers and utilities (MSR/MRS).
namespace SysReg {
@@ -1097,19 +1097,19 @@ namespace SysReg {
struct Fields {
uint8_t op0;
uint8_t op1;
uint8_t cRn;
uint8_t cRm;
uint8_t crn;
uint8_t crm;
uint8_t op2;
};
//! Encodes a system register from `op0`, `op1`, `cRn`, `cRm`, and `op2` fields.
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1, uint32_t cRn, uint32_t cRm, uint32_t op2) noexcept {
return (op0 << 14) | (op1 << 11) | (cRn << 7) | (cRm << 3) | (op2 << 0);
//! Encodes a system register from `op0`, `op1`, `crn`, `crm`, and `op2` fields.
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) noexcept {
return (op0 << 14) | (op1 << 11) | (crn << 7) | (crm << 3) | (op2 << 0);
}
//! Encodes a system register from `fields`.
static ASMJIT_INLINE_CONSTEXPR uint32_t encode(const Fields& fields) noexcept {
return encode(fields.op0, fields.op1, fields.cRn, fields.cRm, fields.op2);
return encode(fields.op0, fields.op1, fields.crn, fields.crm, fields.op2);
}
//! Decodes a system register to \ref Fields.
@@ -1906,7 +1906,7 @@ namespace SysReg {
kZCR_EL2 = encode(0b11, 0b100, 0b0001, 0b0010, 0b000), // RW
kZCR_EL3 = encode(0b11, 0b110, 0b0001, 0b0010, 0b000) // RW
};
};
}
} // {Predicate}

View File

@@ -21,16 +21,16 @@ namespace InstInternal {
// ========================
#ifndef ASMJIT_NO_TEXT
Error instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept {
uint32_t realId = instId & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
Error inst_id_to_string(InstId inst_id, InstStringifyOptions options, String& output) noexcept {
uint32_t real_id = inst_id & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::is_defined_id(real_id))) {
return make_error(Error::kInvalidInstruction);
}
return InstNameUtils::decode(InstDB::_instNameIndexTable[realId], options, InstDB::_instNameStringTable, output);
return InstNameUtils::decode(InstDB::_inst_name_index_table[real_id], options, InstDB::_inst_name_string_table, output);
}
InstId stringToInstId(const char* s, size_t len) noexcept {
InstId string_to_inst_id(const char* s, size_t len) noexcept {
if (ASMJIT_UNLIKELY(!s)) {
return BaseInst::kIdNone;
}
@@ -39,11 +39,11 @@ InstId stringToInstId(const char* s, size_t len) noexcept {
len = strlen(s);
}
if (len == 0u || len > InstDB::instNameIndex.maxNameLength) {
if (len == 0u || len > InstDB::_inst_name_index.max_name_length) {
return BaseInst::kIdNone;
}
return InstNameUtils::findInstruction(s, len, InstDB::_instNameIndexTable, InstDB::_instNameStringTable, InstDB::instNameIndex);
return InstNameUtils::find_instruction(s, len, InstDB::_inst_name_index_table, InstDB::_inst_name_string_table, InstDB::_inst_name_index);
}
#endif // !ASMJIT_NO_TEXT
@@ -51,10 +51,10 @@ InstId stringToInstId(const char* s, size_t len) noexcept {
// ============================
#ifndef ASMJIT_NO_VALIDATION
ASMJIT_FAVOR_SIZE Error validate(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
ASMJIT_FAVOR_SIZE Error validate(const BaseInst& inst, const Operand_* operands, size_t op_count, ValidationFlags validation_flags) noexcept {
// TODO:
DebugUtils::unused(inst, operands, opCount, validationFlags);
return kErrorOk;
Support::maybe_unused(inst, operands, op_count, validation_flags);
return Error::kOk;
}
#endif // !ASMJIT_NO_VALIDATION
@@ -66,7 +66,7 @@ struct InstRWInfoData {
uint8_t rwx[Globals::kMaxOpCount];
};
static const InstRWInfoData instRWInfoData[] = {
static const InstRWInfoData inst_rw_info_table[] = {
#define R uint8_t(OpRWFlags::kRead)
#define W uint8_t(OpRWFlags::kWrite)
#define X uint8_t(OpRWFlags::kRW)
@@ -95,133 +95,133 @@ static const InstRWInfoData instRWInfoData[] = {
#undef X
};
static const uint8_t elementTypeSize[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
static const uint8_t element_type_size_table[8] = { 0, 1, 2, 4, 8, 4, 4, 0 };
Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
Error query_rw_info(const BaseInst& inst, const Operand_* operands, size_t op_count, InstRWInfo* out) noexcept {
// Get the instruction data.
uint32_t realId = inst.id() & uint32_t(InstIdParts::kRealId);
uint32_t real_id = inst.inst_id() & uint32_t(InstIdParts::kRealId);
if (ASMJIT_UNLIKELY(!Inst::isDefinedId(realId))) {
return DebugUtils::errored(kErrorInvalidInstruction);
if (ASMJIT_UNLIKELY(!Inst::is_defined_id(real_id))) {
return make_error(Error::kInvalidInstruction);
}
out->_instFlags = InstRWFlags::kNone;
out->_opCount = uint8_t(opCount);
out->_rmFeature = 0;
out->_extraReg.reset();
out->_readFlags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
out->_writeFlags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
out->_inst_flags = InstRWFlags::kNone;
out->_op_count = uint8_t(op_count);
out->_rm_feature = 0;
out->_extra_reg.reset();
out->_read_flags = CpuRWFlags::kNone; // TODO: [ARM] Read PSTATUS.
out->_write_flags = CpuRWFlags::kNone; // TODO: [ARM] Write PSTATUS
const InstDB::InstInfo& instInfo = InstDB::_instInfoTable[realId];
const InstRWInfoData& rwInfo = instRWInfoData[instInfo.rwInfoIndex()];
const InstDB::InstInfo& inst_info = InstDB::_inst_info_table[real_id];
const InstRWInfoData& rw_info = inst_rw_info_table[inst_info.rw_info_index()];
if (instInfo.hasFlag(InstDB::kInstFlagConsecutive) && opCount > 2) {
for (uint32_t i = 0; i < opCount; i++) {
if (inst_info.has_flag(InstDB::kInstFlagConsecutive) && op_count > 2) {
for (uint32_t i = 0; i < op_count; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
const Operand_& src_op = operands[i];
if (!srcOp.isRegOrMem()) {
if (!src_op.is_reg_or_mem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = i < opCount - 1 ? (OpRWFlags)rwInfo.rwx[0] : (OpRWFlags)rwInfo.rwx[1];
OpRWFlags rw_flags = i < op_count - 1 ? (OpRWFlags)rw_info.rwx[0] : (OpRWFlags)rw_info.rwx[1];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = Reg::kIdBad;
op._rmSize = 0;
op._resetReserved();
op._op_flags = rw_flags & ~(OpRWFlags::kZExt);
op._phys_id = Reg::kIdBad;
op._rm_size = 0;
op._reset_reserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t r_byte_mask = op.is_read() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t w_byte_mask = op.is_write() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
op._read_byte_mask = r_byte_mask;
op._write_byte_mask = w_byte_mask;
op._extend_byte_mask = 0;
op._consecutive_lead_count = 0;
if (srcOp.isReg()) {
if (src_op.is_reg()) {
if (i == 0) {
op._consecutiveLeadCount = uint8_t(opCount - 1);
op._consecutive_lead_count = uint8_t(op_count - 1);
}
else {
op.addOpFlags(OpRWFlags::kConsecutive);
op.add_op_flags(OpRWFlags::kConsecutive);
}
}
else {
const Mem& memOp = srcOp.as<Mem>();
const Mem& mem_op = src_op.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
op.addOpFlags(OpRWFlags::kMemBaseWrite);
if (mem_op.has_base()) {
op.add_op_flags(OpRWFlags::kMemBaseRead);
if ((mem_op.has_index() || mem_op.has_offset()) && mem_op.is_pre_or_post()) {
op.add_op_flags(OpRWFlags::kMemBaseWrite);
}
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
if (mem_op.has_index()) {
op.add_op_flags(OpRWFlags::kMemIndexRead);
}
}
}
}
else {
for (uint32_t i = 0; i < opCount; i++) {
for (uint32_t i = 0; i < op_count; i++) {
OpRWInfo& op = out->_operands[i];
const Operand_& srcOp = operands[i];
const Operand_& src_op = operands[i];
if (!srcOp.isRegOrMem()) {
if (!src_op.is_reg_or_mem()) {
op.reset();
continue;
}
OpRWFlags rwFlags = (OpRWFlags)rwInfo.rwx[i];
OpRWFlags rw_flags = (OpRWFlags)rw_info.rwx[i];
op._opFlags = rwFlags & ~(OpRWFlags::kZExt);
op._physId = Reg::kIdBad;
op._rmSize = 0;
op._resetReserved();
op._op_flags = rw_flags & ~(OpRWFlags::kZExt);
op._phys_id = Reg::kIdBad;
op._rm_size = 0;
op._reset_reserved();
uint64_t rByteMask = op.isRead() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t wByteMask = op.isWrite() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t r_byte_mask = op.is_read() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
uint64_t w_byte_mask = op.is_write() ? 0xFFFFFFFFFFFFFFFFu : 0x0000000000000000u;
op._readByteMask = rByteMask;
op._writeByteMask = wByteMask;
op._extendByteMask = 0;
op._consecutiveLeadCount = 0;
op._read_byte_mask = r_byte_mask;
op._write_byte_mask = w_byte_mask;
op._extend_byte_mask = 0;
op._consecutive_lead_count = 0;
if (srcOp.isReg()) {
if (srcOp.as<Vec>().hasElementIndex()) {
if (src_op.is_reg()) {
if (src_op.as<Vec>().has_element_index()) {
// Only part of the vector is accessed if element index [] is used.
VecElementType elementType = srcOp.as<Vec>().elementType();
uint32_t elementIndex = srcOp.as<Vec>().elementIndex();
VecElementType element_type = src_op.as<Vec>().element_type();
uint32_t element_index = src_op.as<Vec>().element_index();
uint32_t elementSize = elementTypeSize[size_t(elementType)];
uint64_t accessMask = uint64_t(Support::lsbMask<uint32_t>(elementSize)) << (elementIndex * elementSize);
uint32_t element_size = element_type_size_table[size_t(element_type)];
uint64_t access_mask = uint64_t(Support::lsb_mask<uint32_t>(element_size)) << (element_index * element_size);
op._readByteMask &= accessMask;
op._writeByteMask &= accessMask;
op._read_byte_mask &= access_mask;
op._write_byte_mask &= access_mask;
}
// TODO: [ARM] RW info is not finished.
}
else {
const Mem& memOp = srcOp.as<Mem>();
const Mem& mem_op = src_op.as<Mem>();
if (memOp.hasBase()) {
op.addOpFlags(OpRWFlags::kMemBaseRead);
if ((memOp.hasIndex() || memOp.hasOffset()) && memOp.isPreOrPost()) {
op.addOpFlags(OpRWFlags::kMemBaseWrite);
if (mem_op.has_base()) {
op.add_op_flags(OpRWFlags::kMemBaseRead);
if ((mem_op.has_index() || mem_op.has_offset()) && mem_op.is_pre_or_post()) {
op.add_op_flags(OpRWFlags::kMemBaseWrite);
}
}
if (memOp.hasIndex()) {
op.addOpFlags(OpRWFlags::kMemIndexRead);
if (mem_op.has_index()) {
op.add_op_flags(OpRWFlags::kMemIndexRead);
}
}
}
}
return kErrorOk;
return Error::kOk;
}
#endif // !ASMJIT_NO_INTROSPECTION
@@ -229,10 +229,10 @@ Error queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount
// =================================
#ifndef ASMJIT_NO_INTROSPECTION
Error queryFeatures(const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
Error query_features(const BaseInst& inst, const Operand_* operands, size_t op_count, CpuFeatures* out) noexcept {
// TODO: [ARM] QueryFeatures not implemented yet.
DebugUtils::unused(inst, operands, opCount, out);
return kErrorOk;
Support::maybe_unused(inst, operands, op_count, out);
return Error::kOk;
}
#endif // !ASMJIT_NO_INTROSPECTION

View File

@@ -18,17 +18,17 @@ ASMJIT_BEGIN_SUB_NAMESPACE(a64)
namespace InstInternal {
#ifndef ASMJIT_NO_TEXT
Error ASMJIT_CDECL instIdToString(InstId instId, InstStringifyOptions options, String& output) noexcept;
InstId ASMJIT_CDECL stringToInstId(const char* s, size_t len) noexcept;
Error ASMJIT_CDECL inst_id_to_string(InstId inst_id, InstStringifyOptions options, String& output) noexcept;
InstId ASMJIT_CDECL string_to_inst_id(const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
Error ASMJIT_CDECL validate(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
Error ASMJIT_CDECL validate(const BaseInst& inst, const Operand_* operands, size_t op_count, ValidationFlags validation_flags) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
Error ASMJIT_CDECL queryRWInfo(const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
Error ASMJIT_CDECL queryFeatures(const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
Error ASMJIT_CDECL query_rw_info(const BaseInst& inst, const Operand_* operands, size_t op_count, InstRWInfo* out) noexcept;
Error ASMJIT_CDECL query_features(const BaseInst& inst, const Operand_* operands, size_t op_count, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstInternal}

View File

@@ -19,11 +19,11 @@ namespace InstDB {
// ===========================
// Defines an ARM/AArch64 instruction.
#define INST(id, opcodeEncoding, opcodeData, rwInfoIndex, flags, opcodeDataIndex) { \
uint32_t(kEncoding##opcodeEncoding), \
uint32_t(opcodeDataIndex), \
#define INST(id, opcode_encoding, opcode_data, rw_info_index, flags, opcode_data_index) { \
uint32_t(kEncoding##opcode_encoding), \
uint32_t(opcode_data_index), \
0, \
uint16_t(rwInfoIndex), \
uint16_t(rw_info_index), \
uint16_t(flags) \
}
@@ -50,7 +50,7 @@ SYSL
IRG: Insert Random Tag.
INST_(Irg , BaseRRR , (0b1001101011000000000100, kX , kSP, kX , kSP, kX , kZR, true) , kRWI_W , 0 , 0 , 1 ), // #1
*/
const InstInfo _instInfoTable[] = {
const InstInfo _inst_info_table[] = {
// +------------------+---------------------+--------------------------------------------------------------------------------------+-----------+---------------------------+----+
// | Instruction Id | Encoding | Opcode Data | RW Info | Instruction Flags |DatX|
// +------------------+---------------------+--------------------------------------------------------------------------------------+-----------+---------------------------+----+
@@ -109,10 +109,10 @@ const InstInfo _instInfoTable[] = {
INST(Casl , BaseAtomicOp , (0b1000100010100000111111, kWX, 30, 0) , kRWI_XRX , 0 , 9 ), // #51
INST(Caslb , BaseAtomicOp , (0b0000100010100000111111, kW , 0 , 0) , kRWI_XRX , 0 , 10 ), // #52
INST(Caslh , BaseAtomicOp , (0b0100100010100000111111, kW , 0 , 0) , kRWI_XRX , 0 , 11 ), // #53
INST(Casp , BaseAtomicCasp , (0b0000100000100000011111, kWX, 30) , kRWI_XXRRX, 0 , 0 ), // #54
INST(Caspa , BaseAtomicCasp , (0b0000100001100000011111, kWX, 30) , kRWI_XXRRX, 0 , 1 ), // #55
INST(Caspal , BaseAtomicCasp , (0b0000100001100000111111, kWX, 30) , kRWI_XXRRX, 0 , 2 ), // #56
INST(Caspl , BaseAtomicCasp , (0b0000100000100000111111, kWX, 30) , kRWI_XXRRX, 0 , 3 ), // #57
INST(Casp , BaseAtomicCasp , (0b0000100000100000011111, kWX, 30) , kRWI_XXRRX, F(Consecutive) , 0 ), // #54
INST(Caspa , BaseAtomicCasp , (0b0000100001100000011111, kWX, 30) , kRWI_XXRRX, F(Consecutive) , 1 ), // #55
INST(Caspal , BaseAtomicCasp , (0b0000100001100000111111, kWX, 30) , kRWI_XXRRX, F(Consecutive) , 2 ), // #56
INST(Caspl , BaseAtomicCasp , (0b0000100000100000111111, kWX, 30) , kRWI_XXRRX, F(Consecutive) , 3 ), // #57
INST(Cbnz , BaseBranchCmp , (0b00110101000000000000000000000000) , kRWI_R , 0 , 0 ), // #58
INST(Cbz , BaseBranchCmp , (0b00110100000000000000000000000000) , kRWI_R , 0 , 1 ), // #59
INST(Ccmn , BaseCCmp , (0b00111010010000000000000000000000) , kRWI_R , 0 , 0 ), // #60
@@ -1876,7 +1876,7 @@ const InstDB::CommonInfo InstDB::commonData[] = {
#ifndef ASMJIT_NO_TEXT
// ${NameData:Begin}
// ------------------- Automatically generated, do not edit -------------------
const InstNameIndex InstDB::instNameIndex = {{
const InstNameIndex InstDB::_inst_name_index = {{
{ Inst::kIdAbs , Inst::kIdAnd_v + 1 },
{ Inst::kIdB , Inst::kIdBsl_v + 1 },
{ Inst::kIdCas , Inst::kIdCnt_v + 1 },
@@ -1905,7 +1905,7 @@ const InstNameIndex InstDB::instNameIndex = {{
{ Inst::kIdZip1_v , Inst::kIdZip2_v + 1 }
}, uint16_t(9)};
const char InstDB::_instNameStringTable[] =
const char InstDB::_inst_name_string_table[] =
"\x61\x75\x74\x69\x61\x31\x37\x31\x36\x61\x75\x74\x69\x62\x6C\x64\x73\x6D\x61\x78\x61\x6C\x68\x6C\x64\x73\x6D\x69\x6E"
"\x61\x6C\x6C\x64\x75\x6D\x61\x78\x61\x6C\x6C\x64\x75\x6D\x69\x6E\x61\x6C\x73\x68\x61\x32\x35\x36\x73\x75\x30\x73\x68"
"\x61\x35\x31\x32\x73\x75\x31\x73\x6D\x33\x70\x61\x72\x74\x77\x73\x71\x72\x73\x68\x72\x75\x6E\x6C\x64\x61\x64\x64\x61"
@@ -1920,7 +1920,7 @@ const char InstDB::_instNameStringTable[] =
"\x65\x76\x38";
const uint32_t InstDB::_instNameIndexTable[] = {
const uint32_t InstDB::_inst_name_index_table[] = {
0x80000000, // Small ''.
0x80004C41, // Small 'abs'.
0x80000C81, // Small 'adc'.

View File

@@ -38,34 +38,34 @@ struct InstInfo {
//! Instruction encoding type.
uint32_t _encoding : 8;
//! Index to data specific to each encoding type.
uint32_t _encodingDataIndex : 8;
uint32_t _encoding_data_index : 8;
uint32_t _reserved : 16;
uint16_t _rwInfoIndex;
uint16_t _rw_info_index;
uint16_t _flags;
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rwInfoIndex() const noexcept { return _rwInfoIndex; }
ASMJIT_INLINE_NODEBUG uint32_t rw_info_index() const noexcept { return _rw_info_index; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t flags() const noexcept { return _flags; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(uint32_t flag) const { return (_flags & flag) != 0; }
ASMJIT_INLINE_NODEBUG bool has_flag(uint32_t flag) const { return (_flags & flag) != 0; }
//! \}
};
ASMJIT_VARAPI const InstInfo _instInfoTable[];
ASMJIT_VARAPI const InstInfo _inst_info_table[];
[[nodiscard]]
static inline const InstInfo& infoById(InstId instId) noexcept {
instId &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::isDefinedId(instId));
return _instInfoTable[instId];
static ASMJIT_INLINE const InstInfo& inst_info_by_id(InstId inst_id) noexcept {
inst_id &= uint32_t(InstIdParts::kRealId);
ASMJIT_ASSERT(Inst::is_defined_id(inst_id));
return _inst_info_table[inst_id];
}
} // {InstDB}

View File

@@ -277,70 +277,70 @@ struct BaseOpX16 {
struct BaseOpImm {
uint32_t opcode;
uint16_t immBits;
uint16_t immOffset;
uint16_t imm_bits;
uint16_t imm_offset;
};
struct BaseR {
uint32_t opcode;
uint32_t rType : 8;
uint32_t rHiId : 8;
uint32_t rShift : 8;
uint32_t reg_type : 8;
uint32_t reg_hi_id : 8;
uint32_t r_shift : 8;
};
struct BaseRR {
uint32_t opcode;
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t aShift : 5;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t bShift : 5;
uint32_t a_type : 2;
uint32_t a_hi_id : 6;
uint32_t a_shift : 5;
uint32_t b_type : 2;
uint32_t b_hi_id : 6;
uint32_t b_shift : 5;
uint32_t uniform : 1;
};
struct BaseRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t a_type : 2;
uint32_t a_hi_id : 6;
uint32_t b_type : 2;
uint32_t b_hi_id : 6;
uint32_t c_type : 2;
uint32_t c_hi_id : 6;
uint32_t uniform : 1;
};
struct BaseRRRR {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t cType : 2;
uint32_t cHiId : 6;
uint32_t dType : 2;
uint32_t dHiId : 6;
uint32_t a_type : 2;
uint32_t a_hi_id : 6;
uint32_t b_type : 2;
uint32_t b_hi_id : 6;
uint32_t c_type : 2;
uint32_t c_hi_id : 6;
uint32_t d_type : 2;
uint32_t d_hi_id : 6;
uint32_t uniform : 1;
};
struct BaseRRII {
M_OPCODE(opcode, 22)
uint32_t aType : 2;
uint32_t aHiId : 6;
uint32_t bType : 2;
uint32_t bHiId : 6;
uint32_t aImmSize : 6;
uint32_t aImmDiscardLsb : 5;
uint32_t aImmOffset : 5;
uint32_t bImmSize : 6;
uint32_t bImmDiscardLsb : 5;
uint32_t bImmOffset : 5;
uint32_t a_type : 2;
uint32_t a_hi_id : 6;
uint32_t b_type : 2;
uint32_t b_hi_id : 6;
uint32_t a_imm_size : 6;
uint32_t a_imm_discard_lsb : 5;
uint32_t a_imm_offset : 5;
uint32_t b_imm_size : 6;
uint32_t b_imm_discard_lsb : 5;
uint32_t b_imm_offset : 5;
};
struct BaseAtDcIcTlbi {
uint32_t immVerifyMask : 14;
uint32_t immVerifyData : 14;
uint32_t mandatoryReg : 1;
uint32_t imm_verify_mask : 14;
uint32_t imm_verify_data : 14;
uint32_t mandatory_reg : 1;
};
struct BaseAdcSbc {
@@ -348,19 +348,19 @@ struct BaseAdcSbc {
};
struct BaseMinMax {
uint32_t regOp;
uint32_t immOp;
uint32_t register_op;
uint32_t immediate_op;
};
struct BaseAddSub {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
uint32_t shifted_op : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t extended_op : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|Rd|
uint32_t immediate_op: 10; // sf|.......|Sh| Imm:12 |Rn|Rd|
};
struct BaseAdr {
M_OPCODE(opcode, 22)
OffsetType offsetType : 8;
OffsetType offset_type : 8;
};
struct BaseBfm {
@@ -368,21 +368,21 @@ struct BaseBfm {
};
struct BaseCmpCmn {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t extendedOp : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
uint32_t immediateOp: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
uint32_t shifted_op : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t extended_op : 10; // sf|.......|..|.|Rm|Opt|Imm3|Rn|11111|
uint32_t immediate_op: 10; // sf|.......|Sh| Imm:12 |Rn|11111|
};
struct BaseExtend {
M_OPCODE(opcode, 22) // sf|........|N|......|......|Rn|Rd|
uint32_t rType : 2;
uint32_t reg_type : 2;
uint32_t u : 1;
};
struct BaseLogical {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
uint32_t negateImm : 1 ; // True if this is an operation that must negate IMM.
uint32_t shifted_op : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|Rd|
uint32_t immediate_op: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|Rd|
uint32_t negate_imm : 1; // True if this is an operation that must negate IMM.
};
struct BaseMvnNeg {
@@ -390,101 +390,101 @@ struct BaseMvnNeg {
};
struct BaseShift {
M_OPCODE(registerOp, 22)
M_OPCODE(immediateOp, 22)
M_OPCODE(register_op, 22)
M_OPCODE(immediate_op, 22)
uint32_t ror : 2;
};
struct BaseTst {
uint32_t shiftedOp : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t immediateOp: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
uint32_t shifted_op : 10; // sf|.......|Sh|.|Rm| Imm:6 |Rn|11111|
uint32_t immediate_op: 10; // sf|........|N|ImmR:6|ImmS:6|Rn|11111|
};
struct BaseRM_NoImm {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t reg_hi_id : 6;
uint32_t x_offset : 5;
};
struct BaseRM_SImm9 {
M_OPCODE(offsetOp, 22)
M_OPCODE(prePostOp, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
M_OPCODE(offset_op, 22)
M_OPCODE(pre_post_op, 22)
uint32_t reg_type : 2;
uint32_t reg_hi_id : 6;
uint32_t x_offset : 5;
uint32_t imm_shift : 4;
};
struct BaseRM_SImm10 {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t rHiId : 6;
uint32_t xOffset : 5;
uint32_t immShift : 4;
uint32_t reg_type : 2;
uint32_t reg_hi_id : 6;
uint32_t x_offset : 5;
uint32_t imm_shift : 4;
};
struct BasePrfm {
uint32_t registerOp : 11;
uint32_t sOffsetOp : 10;
uint32_t uOffsetOp : 11;
uint32_t literalOp;
uint32_t register_op : 11;
uint32_t s_offset_op : 10;
uint32_t u_offset_op : 11;
uint32_t literal_op;
};
struct BaseLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t uOffsetShift : 3;
uint32_t uAltInstId : 14;
uint32_t u_offset_op : 10;
uint32_t pre_post_op : 11;
uint32_t register_op : 11;
uint32_t literal_op : 8;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
uint32_t u_offset_shift : 3;
uint32_t u_alt_inst_id : 14;
};
struct BaseLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t offsetShift : 3;
uint32_t offset_op : 10;
uint32_t pre_post_op : 10;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
uint32_t offset_shift : 3;
};
struct BaseStx {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
};
struct BaseLdxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
};
struct BaseStxp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
};
struct BaseAtomicOp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
uint32_t zr : 1;
};
struct BaseAtomicSt {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
};
struct BaseAtomicCasp {
M_OPCODE(opcode, 22)
uint32_t rType : 2;
uint32_t xOffset : 5;
uint32_t reg_type : 2;
uint32_t x_offset : 5;
};
using BaseBranchReg = BaseOp;
@@ -503,15 +503,15 @@ using BaseMovKNZ = BaseOp;
using BaseMull = BaseOp;
struct FSimdGeneric {
uint32_t _scalarOp : 28;
uint32_t _scalar_op : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp : 28;
uint32_t _vector_op : 28;
uint32_t _vectorHf : 4;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); }
constexpr uint32_t vectorHf() const noexcept { return uint32_t(_vectorHf); }
constexpr uint32_t scalar_op() const noexcept { return uint32_t(_scalar_op) << 10; }
constexpr uint32_t vector_op() const noexcept { return uint32_t(_vector_op) << 10; }
constexpr uint32_t scalar_hf() const noexcept { return uint32_t(_scalarHf); }
constexpr uint32_t vector_hf() const noexcept { return uint32_t(_vectorHf); }
};
using FSimdVV = FSimdGeneric;
@@ -523,17 +523,17 @@ struct FSimdSV {
};
struct FSimdVVVe {
uint32_t _scalarOp : 28;
uint32_t _scalar_op : 28;
uint32_t _scalarHf : 4;
uint32_t _vectorOp;
uint32_t _vector_op;
uint32_t _elementOp;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t scalarHf() const noexcept { return uint32_t(_scalarHf); };
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t vectorHf() const noexcept { return kHF_C; }
constexpr uint32_t elementScalarOp() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
constexpr uint32_t elementVectorOp() const noexcept { return (uint32_t(_elementOp) << 10); }
constexpr uint32_t scalar_op() const noexcept { return uint32_t(_scalar_op) << 10; }
constexpr uint32_t scalar_hf() const noexcept { return uint32_t(_scalarHf); };
constexpr uint32_t vector_op() const noexcept { return uint32_t(_vector_op) << 10; }
constexpr uint32_t vector_hf() const noexcept { return kHF_C; }
constexpr uint32_t element_scalar_op() const noexcept { return (uint32_t(_elementOp) << 10) | (0x5u << 28); }
constexpr uint32_t element_vector_op() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFcadd {
@@ -546,8 +546,8 @@ struct SimdFcmla {
uint32_t _regularOp;
uint32_t _elementOp;
constexpr uint32_t regularOp() const noexcept { return uint32_t(_regularOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return (uint32_t(_elementOp) << 10); }
constexpr uint32_t regular_op() const noexcept { return uint32_t(_regularOp) << 10; }
constexpr uint32_t element_op() const noexcept { return (uint32_t(_elementOp) << 10); }
};
struct SimdFccmpFccmpe {
@@ -559,18 +559,18 @@ struct SimdFcm {
uint32_t _registerOp : 28;
uint32_t _registerHf : 4;
uint32_t _zeroOp : 28;
uint32_t _zero_op : 28;
constexpr bool hasRegisterOp() const noexcept { return _registerOp != 0; }
constexpr bool hasZeroOp() const noexcept { return _zeroOp != 0; }
constexpr bool has_register_op() const noexcept { return _registerOp != 0; }
constexpr bool has_zero_op() const noexcept { return _zero_op != 0; }
constexpr uint32_t registerScalarOp() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
constexpr uint32_t registerVectorOp() const noexcept { return uint32_t(_registerOp) << 10; }
constexpr uint32_t registerScalarHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t registerVectorHf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t register_scalar_op() const noexcept { return (uint32_t(_registerOp) << 10) | (0x5u << 28); }
constexpr uint32_t register_vector_op() const noexcept { return uint32_t(_registerOp) << 10; }
constexpr uint32_t register_scalar_hf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t register_vector_hf() const noexcept { return uint32_t(_registerHf); }
constexpr uint32_t zeroScalarOp() const noexcept { return (uint32_t(_zeroOp) << 10) | (0x5u << 28); }
constexpr uint32_t zeroVectorOp() const noexcept { return (uint32_t(_zeroOp) << 10); }
constexpr uint32_t zero_scalar_op() const noexcept { return (uint32_t(_zero_op) << 10) | (0x5u << 28); }
constexpr uint32_t zero_vector_op() const noexcept { return (uint32_t(_zero_op) << 10); }
};
struct SimdFcmpFcmpe {
@@ -580,130 +580,130 @@ struct SimdFcmpFcmpe {
struct SimdFcvtLN {
uint32_t _opcode : 22;
uint32_t _isCvtxn : 1;
uint32_t _hasScalar : 1;
uint32_t _is_cvtxn : 1;
uint32_t _has_scalar : 1;
constexpr uint32_t scalarOp() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
constexpr uint32_t vectorOp() const noexcept { return (uint32_t(_opcode) << 10); }
constexpr uint32_t scalar_op() const noexcept { return (uint32_t(_opcode) << 10) | (0x5u << 28); }
constexpr uint32_t vector_op() const noexcept { return (uint32_t(_opcode) << 10); }
constexpr uint32_t isCvtxn() const noexcept { return _isCvtxn; }
constexpr uint32_t hasScalar() const noexcept { return _hasScalar; }
constexpr uint32_t is_cvtxn() const noexcept { return _is_cvtxn; }
constexpr uint32_t has_scalar() const noexcept { return _has_scalar; }
};
struct SimdFcvtSV {
uint32_t _vectorIntOp;
uint32_t _vectorFpOp;
uint32_t _generalOp : 31;
uint32_t _general_op : 31;
uint32_t _isFloatToInt : 1;
constexpr uint32_t scalarIntOp() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorIntOp() const noexcept { return uint32_t(_vectorIntOp) << 10; }
constexpr uint32_t scalarFpOp() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
constexpr uint32_t vectorFpOp() const noexcept { return uint32_t(_vectorFpOp) << 10; }
constexpr uint32_t generalOp() const noexcept { return (uint32_t(_generalOp) << 10); }
constexpr uint32_t scalar_int_op() const noexcept { return (uint32_t(_vectorIntOp) << 10) | (0x5u << 28); }
constexpr uint32_t vector_int_op() const noexcept { return uint32_t(_vectorIntOp) << 10; }
constexpr uint32_t scalar_fp_op() const noexcept { return (uint32_t(_vectorFpOp) << 10) | (0x5u << 28); }
constexpr uint32_t vector_fp_op() const noexcept { return uint32_t(_vectorFpOp) << 10; }
constexpr uint32_t general_op() const noexcept { return (uint32_t(_general_op) << 10); }
constexpr uint32_t isFloatToInt() const noexcept { return _isFloatToInt; }
constexpr uint32_t isFixedPoint() const noexcept { return _vectorFpOp != 0; }
constexpr uint32_t is_float_to_int() const noexcept { return _isFloatToInt; }
constexpr uint32_t is_fixed_point() const noexcept { return _vectorFpOp != 0; }
};
struct SimdFmlal {
uint32_t _vectorOp;
uint32_t _vector_op;
uint32_t _elementOp;
uint8_t _optionalQ;
uint8_t tA;
uint8_t tB;
uint8_t ta;
uint8_t tb;
uint8_t tElement;
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t elementOp() const noexcept { return uint32_t(_elementOp) << 10; }
constexpr uint32_t optionalQ() const noexcept { return _optionalQ; }
constexpr uint32_t vector_op() const noexcept { return uint32_t(_vector_op) << 10; }
constexpr uint32_t element_op() const noexcept { return uint32_t(_elementOp) << 10; }
constexpr uint32_t optional_q() const noexcept { return _optionalQ; }
};
struct FSimdPair {
uint32_t _scalarOp;
uint32_t _vectorOp;
uint32_t _scalar_op;
uint32_t _vector_op;
constexpr uint32_t scalarOp() const noexcept { return uint32_t(_scalarOp) << 10; }
constexpr uint32_t vectorOp() const noexcept { return uint32_t(_vectorOp) << 10; }
constexpr uint32_t scalar_op() const noexcept { return uint32_t(_scalar_op) << 10; }
constexpr uint32_t vector_op() const noexcept { return uint32_t(_vector_op) << 10; }
};
struct ISimdVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct ISimdVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op0_signature;
uint32_t op1_signature;
};
struct ISimdSV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct ISimdVVV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct ISimdVVVx {
M_OPCODE(opcode, 22)
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
uint32_t op0_signature;
uint32_t op1_signature;
uint32_t op2_signature;
};
struct ISimdWWV {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct ISimdVVVe {
uint32_t regularOp : 26; // 22 bits used.
uint32_t regularVecType : 6;
uint32_t elementOp : 26; // 22 bits used.
uint32_t elementVecType : 6;
uint32_t regular_op : 26; // 22 bits used.
uint32_t regular_vec_type : 6;
uint32_t element_op : 26; // 22 bits used.
uint32_t element_vec_type : 6;
};
struct ISimdVVVI {
M_OPCODE(opcode, 22)
uint32_t vecOpType : 6;
uint32_t immSize : 4;
uint32_t immShift : 4;
uint32_t imm64HasOneBitLess : 1;
uint32_t vec_op_type : 6;
uint32_t imm_size : 4;
uint32_t imm_shift : 4;
uint32_t imm64_has_one_bit_less : 1;
};
struct ISimdVVVV {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct ISimdVVVVx {
uint32_t opcode;
uint32_t op0Signature;
uint32_t op1Signature;
uint32_t op2Signature;
uint32_t op3Signature;
uint32_t op0_signature;
uint32_t op1_signature;
uint32_t op2_signature;
uint32_t op3_signature;
};
struct SimdBicOrr {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp; // 22 bits used.
uint32_t register_op; // 22 bits used.
uint32_t immediate_op; // 22 bits used.
};
struct SimdCmp {
uint32_t regOp;
uint32_t zeroOp : 22;
uint32_t vecOpType : 6;
uint32_t register_op;
uint32_t zero_op : 22;
uint32_t vec_op_type : 6;
};
struct SimdDot {
uint32_t vectorOp; // 22 bits used.
uint32_t elementOp; // 22 bits used.
uint8_t tA; // Element-type of the first operand.
uint8_t tB; // Element-type of the second and third operands.
uint32_t vector_op; // 22 bits used.
uint32_t element_op; // 22 bits used.
uint8_t ta; // Element-type of the first operand.
uint8_t tb; // Element-type of the second and third operands.
uint8_t tElement; // Element-type of the element index[] operand.
};
@@ -713,23 +713,23 @@ struct SimdMoviMvni {
};
struct SimdLdSt {
uint32_t uOffsetOp : 10;
uint32_t prePostOp : 11;
uint32_t registerOp : 11;
uint32_t literalOp : 8;
uint32_t uAltInstId : 16;
uint32_t u_offset_op : 10;
uint32_t pre_post_op : 11;
uint32_t register_op : 11;
uint32_t literal_op : 8;
uint32_t u_alt_inst_id : 16;
};
struct SimdLdNStN {
uint32_t singleOp;
uint32_t multipleOp : 22;
uint32_t single_op;
uint32_t multiple_op : 22;
uint32_t n : 3;
uint32_t replicate : 1;
};
struct SimdLdpStp {
uint32_t offsetOp : 10;
uint32_t prePostOp : 10;
uint32_t offset_op : 10;
uint32_t pre_post_op : 10;
};
struct SimdLdurStur {
@@ -739,19 +739,19 @@ struct SimdLdurStur {
struct ISimdPair {
uint32_t opcode2; // 22 bits used.
uint32_t opcode3 : 26; // 22 bits used.
uint32_t opType3 : 6;
uint32_t op_type3 : 6;
};
struct SimdShift {
uint32_t registerOp; // 22 bits used.
uint32_t immediateOp : 22; // 22 bits used.
uint32_t invertedImm : 1;
uint32_t vecOpType : 6;
uint32_t register_op; // 22 bits used.
uint32_t immediate_op : 22; // 22 bits used.
uint32_t inverted_imm : 1;
uint32_t vec_op_type : 6;
};
struct SimdShiftES {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct SimdSm3tt {
@@ -760,13 +760,13 @@ struct SimdSm3tt {
struct SimdSmovUmov {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t isSigned : 1;
uint32_t vec_op_type : 6;
uint32_t is_signed : 1;
};
struct SimdSxtlUxtl {
uint32_t opcode : 22;
uint32_t vecOpType : 6;
uint32_t vec_op_type : 6;
};
struct SimdTblTbx {
@@ -869,9 +869,9 @@ extern const SimdTblTbx simdTblTbx[2];
// ====================
#ifndef ASMJIT_NO_TEXT
extern const InstNameIndex instNameIndex;
extern const char _instNameStringTable[];
extern const uint32_t _instNameIndexTable[];
extern const InstNameIndex _inst_name_index;
extern const char _inst_name_string_table[];
extern const uint32_t _inst_name_index_table[];
#endif // !ASMJIT_NO_TEXT
} // {InstDB}

View File

@@ -21,9 +21,9 @@ UNIT(a64_operand) {
EXPECT_EQ(x(5), x5);
INFO("Checking Gp register properties");
EXPECT_TRUE(Gp().isReg());
EXPECT_TRUE(w0.isReg());
EXPECT_TRUE(x0.isReg());
EXPECT_TRUE(Gp().is_reg());
EXPECT_TRUE(w0.is_reg());
EXPECT_TRUE(x0.is_reg());
EXPECT_EQ(w0.id(), 0u);
EXPECT_EQ(x0.id(), 0u);
EXPECT_EQ(wzr.id(), Gp::kIdZr);
@@ -32,51 +32,51 @@ UNIT(a64_operand) {
EXPECT_EQ(sp.id(), Gp::kIdSp);
EXPECT_EQ(w0.size(), 4u);
EXPECT_EQ(x0.size(), 8u);
EXPECT_EQ(w0.regType(), RegType::kGp32);
EXPECT_EQ(x0.regType(), RegType::kGp64);
EXPECT_EQ(w0.regGroup(), RegGroup::kGp);
EXPECT_EQ(x0.regGroup(), RegGroup::kGp);
EXPECT_EQ(w0.reg_type(), RegType::kGp32);
EXPECT_EQ(x0.reg_type(), RegType::kGp64);
EXPECT_EQ(w0.reg_group(), RegGroup::kGp);
EXPECT_EQ(x0.reg_group(), RegGroup::kGp);
INFO("Checking Vec register properties");
EXPECT_EQ(v0.regType(), RegType::kVec128);
EXPECT_EQ(d0.regType(), RegType::kVec64);
EXPECT_EQ(s0.regType(), RegType::kVec32);
EXPECT_EQ(h0.regType(), RegType::kVec16);
EXPECT_EQ(b0.regType(), RegType::kVec8);
EXPECT_EQ(v0.reg_type(), RegType::kVec128);
EXPECT_EQ(d0.reg_type(), RegType::kVec64);
EXPECT_EQ(s0.reg_type(), RegType::kVec32);
EXPECT_EQ(h0.reg_type(), RegType::kVec16);
EXPECT_EQ(b0.reg_type(), RegType::kVec8);
EXPECT_EQ(v0.regGroup(), RegGroup::kVec);
EXPECT_EQ(d0.regGroup(), RegGroup::kVec);
EXPECT_EQ(s0.regGroup(), RegGroup::kVec);
EXPECT_EQ(h0.regGroup(), RegGroup::kVec);
EXPECT_EQ(b0.regGroup(), RegGroup::kVec);
EXPECT_EQ(v0.reg_group(), RegGroup::kVec);
EXPECT_EQ(d0.reg_group(), RegGroup::kVec);
EXPECT_EQ(s0.reg_group(), RegGroup::kVec);
EXPECT_EQ(h0.reg_group(), RegGroup::kVec);
EXPECT_EQ(b0.reg_group(), RegGroup::kVec);
INFO("Checking Vec register element[] access");
Vec vd_1 = v15.d(1);
EXPECT_EQ(vd_1.regType(), RegType::kVec128);
EXPECT_EQ(vd_1.regGroup(), RegGroup::kVec);
EXPECT_EQ(vd_1.reg_type(), RegType::kVec128);
EXPECT_EQ(vd_1.reg_group(), RegGroup::kVec);
EXPECT_EQ(vd_1.id(), 15u);
EXPECT_TRUE(vd_1.isVecD2());
EXPECT_EQ(vd_1.elementType(), VecElementType::kD);
EXPECT_TRUE(vd_1.hasElementIndex());
EXPECT_EQ(vd_1.elementIndex(), 1u);
EXPECT_TRUE(vd_1.is_vec_d2());
EXPECT_EQ(vd_1.element_type(), VecElementType::kD);
EXPECT_TRUE(vd_1.has_element_index());
EXPECT_EQ(vd_1.element_index(), 1u);
Vec vs_3 = v15.s(3);
EXPECT_EQ(vs_3.regType(), RegType::kVec128);
EXPECT_EQ(vs_3.regGroup(), RegGroup::kVec);
EXPECT_EQ(vs_3.reg_type(), RegType::kVec128);
EXPECT_EQ(vs_3.reg_group(), RegGroup::kVec);
EXPECT_EQ(vs_3.id(), 15u);
EXPECT_TRUE(vs_3.isVecS4());
EXPECT_EQ(vs_3.elementType(), VecElementType::kS);
EXPECT_TRUE(vs_3.hasElementIndex());
EXPECT_EQ(vs_3.elementIndex(), 3u);
EXPECT_TRUE(vs_3.is_vec_s4());
EXPECT_EQ(vs_3.element_type(), VecElementType::kS);
EXPECT_TRUE(vs_3.has_element_index());
EXPECT_EQ(vs_3.element_index(), 3u);
Vec vb_4 = v15.b4(3);
EXPECT_EQ(vb_4.regType(), RegType::kVec128);
EXPECT_EQ(vb_4.regGroup(), RegGroup::kVec);
EXPECT_EQ(vb_4.reg_type(), RegType::kVec128);
EXPECT_EQ(vb_4.reg_group(), RegGroup::kVec);
EXPECT_EQ(vb_4.id(), 15u);
EXPECT_TRUE(vb_4.isVecB4x4());
EXPECT_EQ(vb_4.elementType(), VecElementType::kB4);
EXPECT_TRUE(vb_4.hasElementIndex());
EXPECT_EQ(vb_4.elementIndex(), 3u);
EXPECT_TRUE(vb_4.is_vec_b4x4());
EXPECT_EQ(vb_4.element_type(), VecElementType::kB4);
EXPECT_TRUE(vb_4.has_element_index());
EXPECT_EQ(vb_4.element_index(), 3u);
}
#endif

View File

@@ -48,21 +48,21 @@ public:
//! \name Static Constructors
//! \{
//! Creates a new 32-bit low general purpose register (W) having the given register id `regId`.
//! Creates a new 32-bit low general purpose register (W) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Gp make_r32(uint32_t regId) noexcept { return Gp(_signatureOf<RegType::kGp32>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Gp make_r32(uint32_t reg_id) noexcept { return Gp(signature_of_t<RegType::kGp32>(), reg_id); }
//! Creates a new 64-bit low general purpose register (X) having the given register id `regId`.
//! Creates a new 64-bit low general purpose register (X) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Gp make_r64(uint32_t regId) noexcept { return Gp(_signatureOf<RegType::kGp64>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Gp make_r64(uint32_t reg_id) noexcept { return Gp(signature_of_t<RegType::kGp64>(), reg_id); }
//! Creates a new 32-bit low general purpose register (W) having the given register id `regId`.
//! Creates a new 32-bit low general purpose register (W) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Gp make_w(uint32_t regId) noexcept { return make_r32(regId); }
static ASMJIT_INLINE_CONSTEXPR Gp make_w(uint32_t reg_id) noexcept { return make_r32(reg_id); }
//! Creates a new 64-bit low general purpose register (X) having the given register id `regId`.
//! Creates a new 64-bit low general purpose register (X) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Gp make_x(uint32_t regId) noexcept { return make_r64(regId); }
static ASMJIT_INLINE_CONSTEXPR Gp make_x(uint32_t reg_id) noexcept { return make_r64(reg_id); }
//! \}
@@ -71,11 +71,11 @@ public:
//! Test whether this register is ZR register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isZR() const noexcept { return id() == kIdZr; }
ASMJIT_INLINE_CONSTEXPR bool is_zr() const noexcept { return id() == kIdZr; }
//! Test whether this register is SP register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isSP() const noexcept { return id() == kIdSp; }
ASMJIT_INLINE_CONSTEXPR bool is_sp() const noexcept { return id() == kIdSp; }
//! Clones and casts this register to a 32-bit (W) register.
[[nodiscard]]
@@ -151,12 +151,12 @@ public:
static inline constexpr uint32_t kSignatureElementH2 = uint32_t(VecElementType::kH2) << kSignatureRegElementTypeShift;
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR OperandSignature _makeElementAccessSignature(VecElementType elementType, uint32_t elementIndex) noexcept {
static ASMJIT_INLINE_CONSTEXPR OperandSignature _make_element_access_signature(VecElementType element_type, uint32_t element_index) noexcept {
return OperandSignature{
uint32_t(RegTraits<RegType::kVec128>::kSignature) |
uint32_t(kSignatureRegElementFlagMask) |
(uint32_t(elementType) << kSignatureRegElementTypeShift) |
(uint32_t(elementIndex << kSignatureRegElementIndexShift))
(uint32_t(element_type) << kSignatureRegElementTypeShift) |
(uint32_t(element_index << kSignatureRegElementIndexShift))
};
}
@@ -165,71 +165,71 @@ public:
//! \name Static Constructors
//! \{
//! Creates a new 8-bit vector register (B) having the given register id `regId`.
//! Creates a new 8-bit vector register (B) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v8(uint32_t regId) noexcept { return Vec(_signatureOf<RegType::kVec8>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_v8(uint32_t reg_id) noexcept { return Vec(signature_of_t<RegType::kVec8>(), reg_id); }
//! Creates a new 16-bit vector register (H) having the given register id `regId`.
//! Creates a new 16-bit vector register (H) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v16(uint32_t regId) noexcept { return Vec(_signatureOf<RegType::kVec16>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_v16(uint32_t reg_id) noexcept { return Vec(signature_of_t<RegType::kVec16>(), reg_id); }
//! Creates a new 32-bit vector register (S) having the given register id `regId`.
//! Creates a new 32-bit vector register (S) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v32(uint32_t regId) noexcept { return Vec(_signatureOf<RegType::kVec32>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_v32(uint32_t reg_id) noexcept { return Vec(signature_of_t<RegType::kVec32>(), reg_id); }
//! Creates a new 64-bit vector register (D) having the given register id `regId`.
//! Creates a new 64-bit vector register (D) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v64(uint32_t regId) noexcept { return Vec(_signatureOf<RegType::kVec64>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_v64(uint32_t reg_id) noexcept { return Vec(signature_of_t<RegType::kVec64>(), reg_id); }
//! Creates a new 128-bit vector register (Q) having the given register id `regId`.
//! Creates a new 128-bit vector register (Q) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v128(uint32_t regId) noexcept { return Vec(_signatureOf<RegType::kVec128>(), regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_v128(uint32_t reg_id) noexcept { return Vec(signature_of_t<RegType::kVec128>(), reg_id); }
//! Creates a new 8-bit vector register (B) having the given register id `regId`.
//! Creates a new 8-bit vector register (B) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_b(uint32_t regId) noexcept { return make_v8(regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_b(uint32_t reg_id) noexcept { return make_v8(reg_id); }
//! Creates a new 16-bit vector register (H) having the given register id `regId`.
//! Creates a new 16-bit vector register (H) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_h(uint32_t regId) noexcept { return make_v16(regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_h(uint32_t reg_id) noexcept { return make_v16(reg_id); }
//! Creates a new 32-bit vector register (S) having the given register id `regId`.
//! Creates a new 32-bit vector register (S) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_s(uint32_t regId) noexcept { return make_v32(regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_s(uint32_t reg_id) noexcept { return make_v32(reg_id); }
//! Creates a new 64-bit vector register (D) having the given register id `regId`.
//! Creates a new 64-bit vector register (D) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_d(uint32_t regId) noexcept { return make_v64(regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_d(uint32_t reg_id) noexcept { return make_v64(reg_id); }
//! Creates a new 128-bit vector register (Q) having the given register id `regId`.
//! Creates a new 128-bit vector register (Q) having the given register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_q(uint32_t regId) noexcept { return make_v128(regId); }
static ASMJIT_INLINE_CONSTEXPR Vec make_q(uint32_t reg_id) noexcept { return make_v128(reg_id); }
//! Creates a new 32-bit vector register (S) having the given vector `elementType` and register id `regId`.
//! Creates a new 32-bit vector register (S) having the given vector `element_type` and register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v32_with_element_type(VecElementType elementType, uint32_t regId) noexcept {
uint32_t signature = RegTraits<RegType::kVec32>::kSignature | uint32_t(elementType) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, regId);
static ASMJIT_INLINE_CONSTEXPR Vec make_v32_with_element_type(VecElementType element_type, uint32_t reg_id) noexcept {
uint32_t signature = RegTraits<RegType::kVec32>::kSignature | uint32_t(element_type) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, reg_id);
}
//! Creates a new 64-bit vector register (D) having the given vector `elementType` and register id `regId`.
//! Creates a new 64-bit vector register (D) having the given vector `element_type` and register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v64_with_element_type(VecElementType elementType, uint32_t regId) noexcept {
uint32_t signature = RegTraits<RegType::kVec64>::kSignature | uint32_t(elementType) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, regId);
static ASMJIT_INLINE_CONSTEXPR Vec make_v64_with_element_type(VecElementType element_type, uint32_t reg_id) noexcept {
uint32_t signature = RegTraits<RegType::kVec64>::kSignature | uint32_t(element_type) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, reg_id);
}
//! Creates a new 128-bit vector register (Q) having the given vector `elementType` and register id `regId`.
//! Creates a new 128-bit vector register (Q) having the given vector `element_type` and register id `reg_id`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v128_with_element_type(VecElementType elementType, uint32_t regId) noexcept {
uint32_t signature = RegTraits<RegType::kVec128>::kSignature | uint32_t(elementType) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, regId);
static ASMJIT_INLINE_CONSTEXPR Vec make_v128_with_element_type(VecElementType element_type, uint32_t reg_id) noexcept {
uint32_t signature = RegTraits<RegType::kVec128>::kSignature | uint32_t(element_type) << kSignatureRegElementTypeShift;
return Vec(OperandSignature{signature}, reg_id);
}
//! Creates a new 128-bit vector of type specified by `elementType` and `elementIndex`.
//! Creates a new 128-bit vector of type specified by `element_type` and `element_index`.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Vec make_v128_with_element_index(VecElementType elementType, uint32_t elementIndex, uint32_t regId) noexcept {
return Vec(_makeElementAccessSignature(elementType, elementIndex), regId);
static ASMJIT_INLINE_CONSTEXPR Vec make_v128_with_element_index(VecElementType element_type, uint32_t element_index, uint32_t reg_id) noexcept {
return Vec(_make_element_access_signature(element_type, element_index), reg_id);
}
//! \}
@@ -239,57 +239,57 @@ public:
//! Returns whether the register has element type or element index (or both).
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementTypeOrIndex() const noexcept {
return _signature.hasField<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>();
ASMJIT_INLINE_CONSTEXPR bool has_element_type_or_index() const noexcept {
return _signature.has_field<kSignatureRegElementTypeMask | kSignatureRegElementFlagMask>();
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB8() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_b8() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec64>::kSignature | kSignatureElementB);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH4() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_h4() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec64>::kSignature | kSignatureElementH);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS2() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_s2() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec64>::kSignature | kSignatureElementS);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD1() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_d1() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec64>::kSignature);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB16() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_b16() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementB);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH8() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_h8() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementH);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecS4() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_s4() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementS);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecD2() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_d2() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementD);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecB4x4() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_b4x4() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementB4);
}
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool isVecH2x4() const noexcept {
ASMJIT_INLINE_CONSTEXPR bool is_vec_h2x4() const noexcept {
return _signature.subset(kBaseSignatureMask | kSignatureRegElementTypeMask) == (RegTraits<RegType::kVec128>::kSignature | kSignatureElementH2);
}
@@ -333,29 +333,29 @@ public:
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec q() const noexcept { return make_v128(id()); }
//! Clones and casts the register to a 128-bit V.B[elementIndex] register.
//! Clones and casts the register to a 128-bit V.B[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec b(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kB, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec b(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kB, element_index, id()); }
//! Clones and casts the register to a 128-bit V.H[elementIndex] register.
//! Clones and casts the register to a 128-bit V.H[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec h(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kH, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec h(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kH, element_index, id()); }
//! Clones and casts the register to a 128-bit V.S[elementIndex] register.
//! Clones and casts the register to a 128-bit V.S[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec s(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kS, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec s(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kS, element_index, id()); }
//! Clones and casts the register to a 128-bit V.D[elementIndex] register.
//! Clones and casts the register to a 128-bit V.D[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec d(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kD, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec d(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kD, element_index, id()); }
//! Clones and casts the register to a 128-bit V.H2[elementIndex] register.
//! Clones and casts the register to a 128-bit V.H2[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec h2(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kH2, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec h2(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kH2, element_index, id()); }
//! Clones and casts the register to a 128-bit V.B4[elementIndex] register.
//! Clones and casts the register to a 128-bit V.B4[element_index] register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec b4(uint32_t elementIndex) const noexcept { return make_v128_with_element_index(VecElementType::kB4, elementIndex, id()); }
ASMJIT_INLINE_CONSTEXPR Vec b4(uint32_t element_index) const noexcept { return make_v128_with_element_index(VecElementType::kB4, element_index, id()); }
//! Clones and casts the register to V.8B.
[[nodiscard]]
@@ -396,24 +396,24 @@ public:
//! Returns whether the vector register has associated a vector element type.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR bool hasElementType() const noexcept {
return _signature.hasField<kSignatureRegElementTypeMask>();
ASMJIT_INLINE_CONSTEXPR bool has_element_type() const noexcept {
return _signature.has_field<kSignatureRegElementTypeMask>();
}
//! Returns vector element type of the register.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR VecElementType elementType() const noexcept {
return VecElementType(_signature.getField<kSignatureRegElementTypeMask>());
ASMJIT_INLINE_CONSTEXPR VecElementType element_type() const noexcept {
return VecElementType(_signature.get_field<kSignatureRegElementTypeMask>());
}
//! Sets vector element type of the register to `elementType`.
ASMJIT_INLINE_CONSTEXPR void setElementType(VecElementType elementType) noexcept {
_signature.setField<kSignatureRegElementTypeMask>(uint32_t(elementType));
//! Sets vector element type of the register to `element_type`.
ASMJIT_INLINE_CONSTEXPR void set_element_type(VecElementType element_type) noexcept {
_signature.set_field<kSignatureRegElementTypeMask>(uint32_t(element_type));
}
//! Resets vector element type to none.
ASMJIT_INLINE_CONSTEXPR void resetElementType() noexcept {
_signature.setField<kSignatureRegElementTypeMask>(0);
ASMJIT_INLINE_CONSTEXPR void reset_element_type() noexcept {
_signature.set_field<kSignatureRegElementTypeMask>(0);
}
//! \}
@@ -422,30 +422,30 @@ public:
//! \{
//! Returns whether the register has element index (it's an element index access).
ASMJIT_INLINE_CONSTEXPR bool hasElementIndex() const noexcept {
return _signature.hasField<kSignatureRegElementFlagMask>();
ASMJIT_INLINE_CONSTEXPR bool has_element_index() const noexcept {
return _signature.has_field<kSignatureRegElementFlagMask>();
}
//! Returns element index of the register.
ASMJIT_INLINE_CONSTEXPR uint32_t elementIndex() const noexcept {
return _signature.getField<kSignatureRegElementIndexMask>();
ASMJIT_INLINE_CONSTEXPR uint32_t element_index() const noexcept {
return _signature.get_field<kSignatureRegElementIndexMask>();
}
//! Sets element index of the register to `elementType`.
ASMJIT_INLINE_CONSTEXPR void setElementIndex(uint32_t elementIndex) noexcept {
//! Sets element index of the register to `element_type`.
ASMJIT_INLINE_CONSTEXPR void set_element_index(uint32_t element_index) noexcept {
_signature |= kSignatureRegElementFlagMask;
_signature.setField<kSignatureRegElementIndexMask>(elementIndex);
_signature.set_field<kSignatureRegElementIndexMask>(element_index);
}
//! Resets element index of the register.
ASMJIT_INLINE_CONSTEXPR void resetElementIndex() noexcept {
ASMJIT_INLINE_CONSTEXPR void reset_element_index() noexcept {
_signature &= ~(kSignatureRegElementFlagMask | kSignatureRegElementIndexMask);
}
//! Clones a vector register with element access enabled at the given `elementIndex`.
//! Clones a vector register with element access enabled at the given `element_index`.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR Vec at(uint32_t elementIndex) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (elementIndex << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
ASMJIT_INLINE_CONSTEXPR Vec at(uint32_t element_index) const noexcept {
return Vec((signature() & ~kSignatureRegElementIndexMask) | (element_index << kSignatureRegElementIndexShift) | kSignatureRegElementFlagMask, id());
}
//! \}
@@ -486,35 +486,35 @@ public:
ASMJIT_INLINE_NODEBUG explicit Mem(Globals::NoInit_) noexcept
: BaseMem(Globals::NoInit) {}
ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t baseId, uint32_t indexId, int32_t offset) noexcept
: BaseMem(signature, baseId, indexId, offset) {}
ASMJIT_INLINE_CONSTEXPR Mem(const Signature& signature, uint32_t base_id, uint32_t index_id, int32_t offset) noexcept
: BaseMem(signature, base_id, index_id, offset) {}
ASMJIT_INLINE_CONSTEXPR explicit Mem(const Label& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(RegType::kLabelTag) |
: BaseMem(Signature::from_op_type(OperandType::kMem) |
Signature::from_mem_base_type(RegType::kLabelTag) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_CONSTEXPR explicit Mem(const Reg& base, int32_t off = 0, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.regType()) |
: BaseMem(Signature::from_op_type(OperandType::kMem) |
Signature::from_mem_base_type(base.reg_type()) |
signature, base.id(), 0, off) {}
ASMJIT_INLINE_CONSTEXPR Mem(const Reg& base, const Reg& index, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.regType()) |
Signature::fromMemIndexType(index.regType()) |
: BaseMem(Signature::from_op_type(OperandType::kMem) |
Signature::from_mem_base_type(base.reg_type()) |
Signature::from_mem_index_type(index.reg_type()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_CONSTEXPR Mem(const Reg& base, const Reg& index, const Shift& shift, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
Signature::fromMemBaseType(base.regType()) |
Signature::fromMemIndexType(index.regType()) |
Signature::fromValue<kSignatureMemShiftOpMask>(uint32_t(shift.op())) |
Signature::fromValue<kSignatureMemShiftValueMask>(shift.value()) |
: BaseMem(Signature::from_op_type(OperandType::kMem) |
Signature::from_mem_base_type(base.reg_type()) |
Signature::from_mem_index_type(index.reg_type()) |
Signature::from_value<kSignatureMemShiftOpMask>(uint32_t(shift.op())) |
Signature::from_value<kSignatureMemShiftValueMask>(shift.value()) |
signature, base.id(), index.id(), 0) {}
ASMJIT_INLINE_CONSTEXPR explicit Mem(uint64_t base, Signature signature = Signature{0}) noexcept
: BaseMem(Signature::fromOpType(OperandType::kMem) |
: BaseMem(Signature::from_op_type(OperandType::kMem) |
signature, uint32_t(base >> 32), 0, int32_t(uint32_t(base & 0xFFFFFFFFu))) {}
//! \}
@@ -523,7 +523,7 @@ public:
//! \{
ASMJIT_INLINE_CONSTEXPR Mem& operator=(const Mem& other) noexcept {
copyFrom(other);
copy_from(other);
return *this;
}
@@ -536,39 +536,39 @@ public:
ASMJIT_INLINE_CONSTEXPR Mem clone() const noexcept { return Mem(*this); }
//! Gets new memory operand adjusted by `off`.
ASMJIT_INLINE_CONSTEXPR Mem cloneAdjusted(int64_t off) const noexcept {
ASMJIT_INLINE_CONSTEXPR Mem clone_adjusted(int64_t off) const noexcept {
Mem result(*this);
result.addOffset(off);
result.add_offset(off);
return result;
}
//! Clones the memory operand and makes it pre-index.
ASMJIT_INLINE_CONSTEXPR Mem pre() const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex);
result.set_offset_mode(OffsetMode::kPreIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it pre-index.
ASMJIT_INLINE_CONSTEXPR Mem pre(int64_t off) const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPreIndex);
result.addOffset(off);
result.set_offset_mode(OffsetMode::kPreIndex);
result.add_offset(off);
return result;
}
//! Clones the memory operand and makes it post-index.
ASMJIT_INLINE_CONSTEXPR Mem post() const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex);
result.set_offset_mode(OffsetMode::kPostIndex);
return result;
}
//! Clones the memory operand, applies a given offset `off` and makes it post-index.
ASMJIT_INLINE_CONSTEXPR Mem post(int64_t off) const noexcept {
Mem result(*this);
result.setOffsetMode(OffsetMode::kPostIndex);
result.addOffset(off);
result.set_offset_mode(OffsetMode::kPostIndex);
result.add_offset(off);
return result;
}
@@ -577,26 +577,26 @@ public:
//! \name Base & Index
//! \{
//! Converts memory `baseType` and `baseId` to `arm::Reg` instance.
//! Converts memory `base_type` and `base_id` to `arm::Reg` instance.
//!
//! The memory must have a valid base register otherwise the result will be wrong.
ASMJIT_INLINE_NODEBUG Reg baseReg() const noexcept { return Reg::fromTypeAndId(baseType(), baseId()); }
ASMJIT_INLINE_NODEBUG Reg base_reg() const noexcept { return Reg::from_type_and_id(base_type(), base_id()); }
//! Converts memory `indexType` and `indexId` to `arm::Reg` instance.
//! Converts memory `index_type` and `index_id` to `arm::Reg` instance.
//!
//! The memory must have a valid index register otherwise the result will be wrong.
ASMJIT_INLINE_NODEBUG Reg indexReg() const noexcept { return Reg::fromTypeAndId(indexType(), indexId()); }
ASMJIT_INLINE_NODEBUG Reg index_reg() const noexcept { return Reg::from_type_and_id(index_type(), index_id()); }
using BaseMem::setIndex;
using BaseMem::set_index;
ASMJIT_INLINE_CONSTEXPR void setIndex(const Reg& index, uint32_t shift) noexcept {
setIndex(index);
setShift(shift);
ASMJIT_INLINE_CONSTEXPR void set_index(const Reg& index, uint32_t shift) noexcept {
set_index(index);
set_shift(shift);
}
ASMJIT_INLINE_CONSTEXPR void setIndex(const Reg& index, Shift shift) noexcept {
setIndex(index);
setShift(shift);
ASMJIT_INLINE_CONSTEXPR void set_index(const Reg& index, Shift shift) noexcept {
set_index(index);
set_shift(shift);
}
//! \}
@@ -605,48 +605,48 @@ public:
//! \{
//! Gets offset mode.
ASMJIT_INLINE_CONSTEXPR OffsetMode offsetMode() const noexcept { return OffsetMode(_signature.getField<kSignatureMemOffsetModeMask>()); }
ASMJIT_INLINE_CONSTEXPR OffsetMode offset_mode() const noexcept { return OffsetMode(_signature.get_field<kSignatureMemOffsetModeMask>()); }
//! Sets offset mode to `mode`.
ASMJIT_INLINE_CONSTEXPR void setOffsetMode(OffsetMode mode) noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(mode)); }
ASMJIT_INLINE_CONSTEXPR void set_offset_mode(OffsetMode mode) noexcept { _signature.set_field<kSignatureMemOffsetModeMask>(uint32_t(mode)); }
//! Resets offset mode to default (fixed offset, without write-back).
ASMJIT_INLINE_CONSTEXPR void resetOffsetMode() noexcept { _signature.setField<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); }
ASMJIT_INLINE_CONSTEXPR void reset_offset_mode() noexcept { _signature.set_field<kSignatureMemOffsetModeMask>(uint32_t(OffsetMode::kFixed)); }
//! Tests whether the current memory offset mode is fixed (see \ref OffsetMode::kFixed).
ASMJIT_INLINE_CONSTEXPR bool isFixedOffset() const noexcept { return offsetMode() == OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is fixed (see \ref arm::OffsetMode::kFixed).
ASMJIT_INLINE_CONSTEXPR bool is_fixed_offset() const noexcept { return offset_mode() == OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is either pre-index or post-index (write-back is used).
ASMJIT_INLINE_CONSTEXPR bool isPreOrPost() const noexcept { return offsetMode() != OffsetMode::kFixed; }
ASMJIT_INLINE_CONSTEXPR bool is_pre_or_post() const noexcept { return offset_mode() != OffsetMode::kFixed; }
//! Tests whether the current memory offset mode is pre-index (write-back is used).
ASMJIT_INLINE_CONSTEXPR bool isPreIndex() const noexcept { return offsetMode() == OffsetMode::kPreIndex; }
ASMJIT_INLINE_CONSTEXPR bool is_pre_index() const noexcept { return offset_mode() == OffsetMode::kPreIndex; }
//! Tests whether the current memory offset mode is post-index (write-back is used).
ASMJIT_INLINE_CONSTEXPR bool isPostIndex() const noexcept { return offsetMode() == OffsetMode::kPostIndex; }
ASMJIT_INLINE_CONSTEXPR bool is_post_index() const noexcept { return offset_mode() == OffsetMode::kPostIndex; }
//! Sets offset mode of this memory operand to pre-index (write-back is used).
ASMJIT_INLINE_CONSTEXPR void makePreIndex() noexcept { setOffsetMode(OffsetMode::kPreIndex); }
ASMJIT_INLINE_CONSTEXPR void make_pre_index() noexcept { set_offset_mode(OffsetMode::kPreIndex); }
//! Sets offset mode of this memory operand to post-index (write-back is used).
ASMJIT_INLINE_CONSTEXPR void makePostIndex() noexcept { setOffsetMode(OffsetMode::kPostIndex); }
ASMJIT_INLINE_CONSTEXPR void make_post_index() noexcept { set_offset_mode(OffsetMode::kPostIndex); }
//! Gets shift operation that is used by index register.
ASMJIT_INLINE_CONSTEXPR ShiftOp shiftOp() const noexcept { return ShiftOp(_signature.getField<kSignatureMemShiftOpMask>()); }
ASMJIT_INLINE_CONSTEXPR ShiftOp shift_op() const noexcept { return ShiftOp(_signature.get_field<kSignatureMemShiftOpMask>()); }
//! Sets shift operation that is used by index register.
ASMJIT_INLINE_CONSTEXPR void setShiftOp(ShiftOp sop) noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(sop)); }
ASMJIT_INLINE_CONSTEXPR void set_shift_op(ShiftOp sop) noexcept { _signature.set_field<kSignatureMemShiftOpMask>(uint32_t(sop)); }
//! Resets shift operation that is used by index register to LSL (default value).
ASMJIT_INLINE_CONSTEXPR void resetShiftOp() noexcept { _signature.setField<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); }
ASMJIT_INLINE_CONSTEXPR void reset_shift_op() noexcept { _signature.set_field<kSignatureMemShiftOpMask>(uint32_t(ShiftOp::kLSL)); }
//! Gets whether the memory operand has shift (aka scale) constant.
ASMJIT_INLINE_CONSTEXPR bool hasShift() const noexcept { return _signature.hasField<kSignatureMemShiftValueMask>(); }
ASMJIT_INLINE_CONSTEXPR bool has_shift() const noexcept { return _signature.has_field<kSignatureMemShiftValueMask>(); }
//! Gets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.getField<kSignatureMemShiftValueMask>(); }
ASMJIT_INLINE_CONSTEXPR uint32_t shift() const noexcept { return _signature.get_field<kSignatureMemShiftValueMask>(); }
//! Sets the memory operand's shift (aka scale) constant.
ASMJIT_INLINE_CONSTEXPR void setShift(uint32_t shift) noexcept { _signature.setField<kSignatureMemShiftValueMask>(shift); }
ASMJIT_INLINE_CONSTEXPR void set_shift(uint32_t shift) noexcept { _signature.set_field<kSignatureMemShiftValueMask>(shift); }
//! Sets the memory operand's shift and shift operation.
ASMJIT_INLINE_CONSTEXPR void setShift(Shift shift) noexcept {
_signature.setField<kSignatureMemShiftOpMask>(uint32_t(shift.op()));
_signature.setField<kSignatureMemShiftValueMask>(shift.value());
ASMJIT_INLINE_CONSTEXPR void set_shift(Shift shift) noexcept {
_signature.set_field<kSignatureMemShiftOpMask>(uint32_t(shift.op()));
_signature.set_field<kSignatureMemShiftValueMask>(shift.value());
}
//! Resets the memory operand's shift (aka scale) constant to zero.
ASMJIT_INLINE_CONSTEXPR void resetShift() noexcept { _signature.setField<kSignatureMemShiftValueMask>(0); }
ASMJIT_INLINE_CONSTEXPR void reset_shift() noexcept { _signature.set_field<kSignatureMemShiftValueMask>(0); }
//! \}
};
@@ -1035,13 +1035,13 @@ static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, int32_t offset = 0) noexc
//! Creates `[base, offset]!` memory operand (pre-index mode) (AArch64).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
return Mem(base, offset, OperandSignature::from_value<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
}
//! Creates `[base], offset` memory operand (post-index mode) (AArch64).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, int32_t offset = 0) noexcept {
return Mem(base, offset, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
return Mem(base, offset, OperandSignature::from_value<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
}
//! Creates `[base, index]` memory operand (AArch64).
@@ -1053,13 +1053,13 @@ static ASMJIT_INLINE_CONSTEXPR Mem ptr(const Gp& base, const Gp& index) noexcept
//! Creates `[base, index]!` memory operand (pre-index mode) (AArch64).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_pre(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
return Mem(base, index, OperandSignature::from_value<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPreIndex));
}
//! Creates `[base], index` memory operand (post-index mode) (AArch64).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR Mem ptr_post(const Gp& base, const Gp& index) noexcept {
return Mem(base, index, OperandSignature::fromValue<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
return Mem(base, index, OperandSignature::from_value<Mem::kSignatureMemOffsetModeMask>(OffsetMode::kPostIndex));
}
//! Creates `[base, index, SHIFT_OP #shift]` memory operand (AArch64).

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,8 @@
#ifndef ASMJIT_NO_COMPILER
#include "../core/compiler.h"
#include "../core/rabuilders_p.h"
#include "../core/racfgblock_p.h"
#include "../core/racfgbuilder_p.h"
#include "../core/rapass_p.h"
#include "../arm/a64assembler.h"
#include "../arm/a64compiler.h"
@@ -34,14 +35,14 @@ public:
//! \name Members
//! \{
EmitHelper _emitHelper;
EmitHelper _emit_helper;
//! \}
//! \name Construction & Destruction
//! \{
ARMRAPass() noexcept;
ARMRAPass(BaseCompiler& cc) noexcept;
~ARMRAPass() noexcept override;
//! \}
@@ -51,54 +52,54 @@ public:
//! Returns the compiler casted to `arm::Compiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Compiler* cc() const noexcept { return static_cast<Compiler*>(_cb); }
ASMJIT_INLINE_NODEBUG Compiler& cc() const noexcept { return static_cast<Compiler&>(_cb); }
//! Returns emit helper.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitHelper* emitHelper() noexcept { return &_emitHelper; }
ASMJIT_INLINE_NODEBUG EmitHelper* emit_helper() noexcept { return &_emit_helper; }
//! \}
//! \name Events
//! \{
void onInit() noexcept override;
void onDone() noexcept override;
void on_init() noexcept override;
void on_done() noexcept override;
//! \}
//! \name CFG
//! \{
Error buildCFG() noexcept override;
Error build_cfg_nodes() noexcept override;
//! \}
//! \name Rewrite
//! \{
Error _rewrite(BaseNode* first, BaseNode* stop) noexcept override;
Error rewrite() noexcept override;
//! \}
//! \name Prolog & Epilog
//! \{
Error updateStackFrame() noexcept override;
Error update_stack_frame() noexcept override;
//! \}
//! \name Emit Helpers
//! \{
Error emitMove(uint32_t workId, uint32_t dstPhysId, uint32_t srcPhysId) noexcept override;
Error emitSwap(uint32_t aWorkId, uint32_t aPhysId, uint32_t bWorkId, uint32_t bPhysId) noexcept override;
Error emit_move(RAWorkReg* work_reg, uint32_t dst_phys_id, uint32_t src_phys_id) noexcept override;
Error emit_swap(RAWorkReg* a_reg, uint32_t a_phys_id, RAWorkReg* b_reg, uint32_t b_phys_id) noexcept override;
Error emitLoad(uint32_t workId, uint32_t dstPhysId) noexcept override;
Error emitSave(uint32_t workId, uint32_t srcPhysId) noexcept override;
Error emit_load(RAWorkReg* work_reg, uint32_t dst_phys_id) noexcept override;
Error emit_save(RAWorkReg* work_reg, uint32_t src_phys_id) noexcept override;
Error emitJump(const Label& label) noexcept override;
Error emitPreCall(InvokeNode* invokeNode) noexcept override;
Error emit_jump(const Label& label) noexcept override;
Error emit_pre_call(InvokeNode* invoke_node) noexcept override;
//! \}
};

View File

@@ -23,9 +23,9 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm)
// arm::FormatterInternal - Format Feature
// =======================================
Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept {
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "sFeature", "strip": "k"}@
static const char sFeatureString[] =
Error FormatterInternal::format_feature(String& sb, uint32_t feature_id) noexcept {
// @EnumStringBegin{"enum": "CpuFeatures::ARM", "output": "feature_string", "strip": "k"}@
static const char feature_string_data[] =
"None\0"
"ARMv6\0"
"ARMv7\0"
@@ -253,7 +253,7 @@ Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept
"XS\0"
"<Unknown>\0";
static const uint16_t sFeatureIndex[] = {
static const uint16_t feature_string_index[] = {
0, 5, 11, 17, 24, 30, 38, 43, 49, 53, 57, 61, 66, 73, 79, 85, 90, 95, 99,
103, 109, 113, 120, 125, 131, 145, 149, 154, 160, 166, 171, 176, 183, 188,
193, 197, 201, 209, 213, 218, 223, 229, 235, 239, 245, 250, 257, 264, 272,
@@ -273,25 +273,25 @@ Error FormatterInternal::formatFeature(String& sb, uint32_t featureId) noexcept
};
// @EnumStringEnd@
return sb.append(sFeatureString + sFeatureIndex[Support::min<uint32_t>(featureId, uint32_t(CpuFeatures::ARM::kMaxValue) + 1)]);
return sb.append(feature_string_data + feature_string_index[Support::min(feature_id, uint32_t(CpuFeatures::ARM::kMaxValue) + 1u)]);
}
// arm::FormatterInternal - Format Constants
// =========================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatCondCode(String& sb, CondCode cc) noexcept {
static const char condCodeData[] =
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_cond_code(String& sb, CondCode cc) noexcept {
static const char cond_code_string_data[] =
"al\0" "na\0"
"eq\0" "ne\0"
"hs\0" "lo\0" "mi\0" "pl\0" "vs\0" "vc\0"
"hi\0" "ls\0" "ge\0" "lt\0" "gt\0" "le\0"
"<Unknown>";
return sb.append(condCodeData + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
return sb.append(cond_code_string_data + Support::min<uint32_t>(uint32_t(cc), 16u) * 3);
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shiftOp) noexcept {
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_shift_op(String& sb, ShiftOp shift_op) noexcept {
const char* str = nullptr;
switch (shiftOp) {
switch (shift_op) {
case ShiftOp::kLSL: str = "lsl"; break;
case ShiftOp::kLSR: str = "lsr"; break;
case ShiftOp::kASR: str = "asr"; break;
@@ -316,12 +316,12 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatShiftOp(String& sb, ShiftOp shi
struct FormatElementData {
char letter;
uint8_t elementCount;
uint8_t onlyIndex;
uint8_t element_count;
uint8_t only_index;
uint8_t reserved;
};
static constexpr FormatElementData formatElementDataTable[9] = {
static constexpr FormatElementData format_element_data_table[9] = {
{ '?' , 0 , 0, 0 }, // None
{ 'b' , 16, 0, 0 }, // bX or b[index]
{ 'h' , 8 , 0, 0 }, // hX or h[index]
@@ -333,63 +333,63 @@ static constexpr FormatElementData formatElementDataTable[9] = {
{ '?' , 0 , 0, 0 } // invalid (never stored in Operand, bug...)
};
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_register(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType,
uint32_t elementIndex) noexcept {
RegType reg_type,
uint32_t reg_id,
uint32_t element_type,
uint32_t element_index) noexcept {
DebugUtils::unused(flags);
DebugUtils::unused(arch);
Support::maybe_unused(flags);
Support::maybe_unused(arch);
static const char bhsdq[] = "bhsdq";
bool virtRegFormatted = false;
bool is_virt_reg_formatted = false;
#ifndef ASMJIT_NO_COMPILER
if (Operand::isVirtId(rId)) {
if (emitter && emitter->isCompiler()) {
if (Operand::is_virt_id(reg_id)) {
if (emitter && emitter->is_compiler()) {
const BaseCompiler* cc = static_cast<const BaseCompiler*>(emitter);
if (cc->isVirtIdValid(rId)) {
VirtReg* vReg = cc->virtRegById(rId);
ASMJIT_ASSERT(vReg != nullptr);
if (cc->is_virt_id_valid(reg_id)) {
VirtReg* virt_reg = cc->virt_reg_by_id(reg_id);
ASMJIT_ASSERT(virt_reg != nullptr);
ASMJIT_PROPAGATE(Formatter::formatVirtRegName(sb, vReg));
virtRegFormatted = true;
ASMJIT_PROPAGATE(Formatter::format_virt_reg_name(sb, virt_reg));
is_virt_reg_formatted = true;
}
}
}
#else
DebugUtils::unused(emitter, flags);
Support::maybe_unused(emitter, flags);
#endif
if (!virtRegFormatted) {
if (!is_virt_reg_formatted) {
char letter = '\0';
switch (regType) {
switch (reg_type) {
case RegType::kVec8:
case RegType::kVec16:
case RegType::kVec32:
case RegType::kVec64:
case RegType::kVec128:
letter = bhsdq[uint32_t(regType) - uint32_t(RegType::kVec8)];
if (elementType) {
letter = bhsdq[uint32_t(reg_type) - uint32_t(RegType::kVec8)];
if (element_type) {
letter = 'v';
}
break;
case RegType::kGp32:
if (Environment::is64Bit(arch)) {
if (Environment::is_64bit(arch)) {
letter = 'w';
if (rId == a64::Gp::kIdZr) {
if (reg_id == a64::Gp::kIdZr) {
return sb.append("wzr", 3);
}
if (rId == a64::Gp::kIdSp) {
if (reg_id == a64::Gp::kIdSp) {
return sb.append("wsp", 3);
}
}
@@ -399,12 +399,12 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
break;
case RegType::kGp64:
if (Environment::is64Bit(arch)) {
if (rId == a64::Gp::kIdZr) {
if (Environment::is_64bit(arch)) {
if (reg_id == a64::Gp::kIdZr) {
return sb.append("xzr", 3);
}
if (rId == a64::Gp::kIdSp) {
if (reg_id == a64::Gp::kIdSp) {
return sb.append("sp", 2);
}
@@ -416,121 +416,121 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegister(
[[fallthrough]];
default:
ASMJIT_PROPAGATE(sb.appendFormat("<Reg-%u>?%u", uint32_t(regType), rId));
ASMJIT_PROPAGATE(sb.append_format("<Reg-%u>?%u", uint32_t(reg_type), reg_id));
break;
}
if (letter)
ASMJIT_PROPAGATE(sb.appendFormat("%c%u", letter, rId));
ASMJIT_PROPAGATE(sb.append_format("%c%u", letter, reg_id));
}
constexpr uint32_t kElementTypeCount = uint32_t(a64::VecElementType::kMaxValue) + 1;
if (elementType) {
elementType = Support::min(elementType, kElementTypeCount);
if (element_type) {
element_type = Support::min(element_type, kElementTypeCount);
FormatElementData elementData = formatElementDataTable[elementType];
uint32_t elementCount = elementData.elementCount;
FormatElementData element_data = format_element_data_table[element_type];
uint32_t element_count = element_data.element_count;
if (regType == RegType::kVec64) {
elementCount /= 2u;
if (reg_type == RegType::kVec64) {
element_count /= 2u;
}
ASMJIT_PROPAGATE(sb.append('.'));
if (elementCount) {
ASMJIT_PROPAGATE(sb.appendUInt(elementCount));
if (element_count) {
ASMJIT_PROPAGATE(sb.append_uint(element_count));
}
ASMJIT_PROPAGATE(sb.append(elementData.letter));
ASMJIT_PROPAGATE(sb.append(element_data.letter));
}
if (elementIndex != 0xFFFFFFFFu) {
ASMJIT_PROPAGATE(sb.appendFormat("[%u]", elementIndex));
if (element_index != 0xFFFFFFFFu) {
ASMJIT_PROPAGATE(sb.append_format("[%u]", element_index));
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatRegisterList(
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_register_list(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rMask) noexcept {
RegType reg_type,
uint32_t reg_mask) noexcept {
bool first = true;
ASMJIT_PROPAGATE(sb.append('{'));
while (rMask != 0u) {
uint32_t start = Support::ctz(rMask);
while (reg_mask != 0u) {
uint32_t start = Support::ctz(reg_mask);
uint32_t count = 0u;
uint32_t mask = 1u << start;
do {
rMask &= ~mask;
reg_mask &= ~mask;
mask <<= 1u;
count++;
} while (rMask & mask);
} while (reg_mask & mask);
if (!first) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start, 0, 0xFFFFFFFFu));
ASMJIT_PROPAGATE(format_register(sb, flags, emitter, arch, reg_type, start, 0, 0xFFFFFFFFu));
if (count >= 2u) {
ASMJIT_PROPAGATE(sb.append('-'));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, regType, start + count - 1, 0, 0xFFFFFFFFu));
ASMJIT_PROPAGATE(format_register(sb, flags, emitter, arch, reg_type, start + count - 1, 0, 0xFFFFFFFFu));
}
first = false;
}
ASMJIT_PROPAGATE(sb.append('}'));
return kErrorOk;
return Error::kOk;
}
// a64::FormatterInternal - Format Operand
// =======================================
ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
ASMJIT_FAVOR_SIZE Error FormatterInternal::format_operand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
if (op.isReg()) {
if (op.is_reg()) {
const Reg& reg = op.as<Reg>();
uint32_t elementType = op._signature.getField<a64::Vec::kSignatureRegElementTypeMask>();
uint32_t elementIndex = op.as<a64::Vec>().elementIndex();
uint32_t element_type = op._signature.get_field<a64::Vec::kSignatureRegElementTypeMask>();
uint32_t element_index = op.as<a64::Vec>().element_index();
if (!op.as<a64::Vec>().hasElementIndex()) {
elementIndex = 0xFFFFFFFFu;
if (!op.as<a64::Vec>().has_element_index()) {
element_index = 0xFFFFFFFFu;
}
return formatRegister(sb, flags, emitter, arch, reg.regType(), reg.id(), elementType, elementIndex);
return format_register(sb, flags, emitter, arch, reg.reg_type(), reg.id(), element_type, element_index);
}
if (op.isMem()) {
if (op.is_mem()) {
const a64::Mem& m = op.as<a64::Mem>();
ASMJIT_PROPAGATE(sb.append('['));
if (m.hasBase()) {
if (m.hasBaseLabel()) {
ASMJIT_PROPAGATE(Formatter::formatLabel(sb, flags, emitter, m.baseId()));
if (m.has_base()) {
if (m.has_base_label()) {
ASMJIT_PROPAGATE(Formatter::format_label(sb, flags, emitter, m.base_id()));
}
else {
FormatFlags modifiedFlags = flags;
if (m.isRegHome()) {
FormatFlags modified_flags = flags;
if (m.is_reg_home()) {
ASMJIT_PROPAGATE(sb.append('&'));
modifiedFlags &= ~FormatFlags::kRegCasts;
modified_flags &= ~FormatFlags::kRegCasts;
}
ASMJIT_PROPAGATE(formatRegister(sb, modifiedFlags, emitter, arch, m.baseType(), m.baseId()));
ASMJIT_PROPAGATE(format_register(sb, modified_flags, emitter, arch, m.base_type(), m.base_id()));
}
}
else {
// ARM really requires base.
if (m.hasIndex() || m.hasOffset()) {
if (m.has_index() || m.has_offset()) {
ASMJIT_PROPAGATE(sb.append("<None>"));
}
}
@@ -538,15 +538,15 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
// The post index makes it look like there was another operand, but it's
// still the part of AsmJit's `arm::Mem` operand so it's consistent with
// other architectures.
if (m.isPostIndex())
if (m.is_post_index())
ASMJIT_PROPAGATE(sb.append(']'));
if (m.hasIndex()) {
if (m.has_index()) {
ASMJIT_PROPAGATE(sb.append(", "));
ASMJIT_PROPAGATE(formatRegister(sb, flags, emitter, arch, m.indexType(), m.indexId()));
ASMJIT_PROPAGATE(format_register(sb, flags, emitter, arch, m.index_type(), m.index_id()));
}
if (m.hasOffset()) {
if (m.has_offset()) {
ASMJIT_PROPAGATE(sb.append(", "));
int64_t off = int64_t(m.offset());
@@ -557,59 +557,59 @@ ASMJIT_FAVOR_SIZE Error FormatterInternal::formatOperand(
}
if (base == 10) {
ASMJIT_PROPAGATE(sb.appendInt(off, base));
ASMJIT_PROPAGATE(sb.append_int(off, base));
}
else {
ASMJIT_PROPAGATE(sb.append("0x"));
ASMJIT_PROPAGATE(sb.appendUInt(uint64_t(off), base));
ASMJIT_PROPAGATE(sb.append_uint(uint64_t(off), base));
}
}
if (m.hasShift()) {
if (m.has_shift()) {
ASMJIT_PROPAGATE(sb.append(' '));
if (!m.isPreOrPost()) {
ASMJIT_PROPAGATE(formatShiftOp(sb, m.shiftOp()));
if (!m.is_pre_or_post()) {
ASMJIT_PROPAGATE(format_shift_op(sb, m.shift_op()));
}
ASMJIT_PROPAGATE(sb.appendFormat(" %u", m.shift()));
ASMJIT_PROPAGATE(sb.append_format(" %u", m.shift()));
}
if (!m.isPostIndex()) {
if (!m.is_post_index()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
if (m.isPreIndex()) {
if (m.is_pre_index()) {
ASMJIT_PROPAGATE(sb.append('!'));
}
return kErrorOk;
return Error::kOk;
}
if (op.isImm()) {
if (op.is_imm()) {
const Imm& i = op.as<Imm>();
int64_t val = i.value();
uint32_t predicate = i.predicate();
if (predicate) {
ASMJIT_PROPAGATE(formatShiftOp(sb, ShiftOp(predicate)));
ASMJIT_PROPAGATE(format_shift_op(sb, ShiftOp(predicate)));
ASMJIT_PROPAGATE(sb.append(' '));
}
if (Support::test(flags, FormatFlags::kHexImms) && uint64_t(val) > 9) {
ASMJIT_PROPAGATE(sb.append("0x"));
return sb.appendUInt(uint64_t(val), 16);
return sb.append_uint(uint64_t(val), 16);
}
else {
return sb.appendInt(val, 10);
return sb.append_int(val, 10);
}
}
if (op.isLabel()) {
return Formatter::formatLabel(sb, flags, emitter, op.id());
if (op.is_label()) {
return Formatter::format_label(sb, flags, emitter, op.id());
}
if (op.isRegList()) {
const BaseRegList& regList = op.as<BaseRegList>();
return formatRegisterList(sb, flags, emitter, arch, regList.regType(), regList.list());
if (op.is_reg_list()) {
const BaseRegList& reg_list = op.as<BaseRegList>();
return format_register_list(sb, flags, emitter, arch, reg_list.reg_type(), reg_list.list());
}
return sb.append("<None>");

View File

@@ -21,37 +21,37 @@ ASMJIT_BEGIN_SUB_NAMESPACE(arm)
namespace FormatterInternal {
Error ASMJIT_CDECL formatFeature(
Error ASMJIT_CDECL format_feature(
String& sb,
uint32_t featureId) noexcept;
uint32_t feature_id) noexcept;
Error ASMJIT_CDECL formatCondCode(
Error ASMJIT_CDECL format_cond_code(
String& sb,
CondCode cc) noexcept;
Error ASMJIT_CDECL formatShiftOp(
Error ASMJIT_CDECL format_shift_op(
String& sb,
ShiftOp shiftOp) noexcept;
ShiftOp shift_op) noexcept;
Error ASMJIT_CDECL formatRegister(
Error ASMJIT_CDECL format_register(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rId,
uint32_t elementType = 0,
uint32_t elementIndex = 0xFFFFFFFF) noexcept;
RegType reg_type,
uint32_t reg_id,
uint32_t element_type = 0,
uint32_t element_index = 0xFFFFFFFF) noexcept;
Error ASMJIT_CDECL formatRegisterList(
Error ASMJIT_CDECL format_register_list(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t rMask) noexcept;
RegType reg_type,
uint32_t reg_mask) noexcept;
Error ASMJIT_CDECL formatOperand(
Error ASMJIT_CDECL format_operand(
String& sb,
FormatFlags flags,
const BaseEmitter* emitter,

View File

@@ -19,7 +19,8 @@ namespace Utils {
//! Encodes a 12-bit immediate part of opcode that ise used by a standard 32-bit ARM encoding.
[[maybe_unused]]
static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexcept {
[[nodiscard]]
static ASMJIT_INLINE bool encode_aarch32_imm(uint64_t imm, Out<uint32_t> imm_out) noexcept {
if (imm & 0xFFFFFFFF00000000u)
return false;
@@ -27,7 +28,7 @@ static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexc
uint32_t r = 0;
if (v <= 0xFFu) {
*encodedImmOut = v;
imm_out = v;
return true;
}
@@ -45,7 +46,7 @@ static inline bool encodeAArch32Imm(uint64_t imm, uint32_t* encodedImmOut) noexc
if (v > 0xFFu)
return false;
*encodedImmOut = v | (r << 7);
imm_out = v | (r << 7);
return true;
}
@@ -74,47 +75,50 @@ struct LogicalImm {
//! +---+--------+--------+------+
//! ```
[[maybe_unused]]
static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noexcept {
[[nodiscard]]
static ASMJIT_INLINE bool encode_logical_imm(uint64_t imm, uint32_t width, Out<LogicalImm> out) noexcept {
// Determine the element width, which must be 2, 4, 8, 16, 32, or 64 bits.
do {
width /= 2;
width /= 2u;
uint64_t mask = (uint64_t(1) << width) - 1u;
if ((imm & mask) != ((imm >> width) & mask)) {
width *= 2;
width *= 2u;
break;
}
} while (width > 2);
} while (width > 2u);
// Patterns of all zeros and all ones are not encodable.
uint64_t lsbMask = Support::lsbMask<uint64_t>(width);
imm &= lsbMask;
uint64_t lsb_mask = Support::lsb_mask<uint64_t>(width);
imm &= lsb_mask;
if (imm == 0 || imm == lsbMask)
if (imm == 0 || imm == lsb_mask) {
return false;
}
// Inspect the pattern and get the most important bit indexes.
//
// oIndex <-+ +-> zIndex
// o_index <-+ +-> z_index
// | |
// |..zeros..|oCount|zCount|..ones..|
// |..zeros..|o_count|z_count|..ones..|
// |000000000|111111|000000|11111111|
uint32_t zIndex = Support::ctz(~imm);
uint64_t zImm = imm ^ ((uint64_t(1) << zIndex) - 1);
uint32_t zCount = (zImm ? Support::ctz(zImm) : width) - zIndex;
uint32_t z_index = Support::ctz(~imm);
uint64_t z_imm = imm ^ ((uint64_t(1) << z_index) - 1);
uint32_t z_count = (z_imm ? Support::ctz(z_imm) : width) - z_index;
uint32_t oIndex = zIndex + zCount;
uint64_t oImm = ~(zImm ^ Support::lsbMask<uint64_t>(oIndex));
uint32_t oCount = (oImm ? Support::ctz(oImm) : width) - (oIndex);
uint32_t o_index = z_index + z_count;
uint64_t o_imm = ~(z_imm ^ Support::lsb_mask<uint64_t>(o_index));
uint32_t o_count = (o_imm ? Support::ctz(o_imm) : width) - (o_index);
// Verify whether the bit-pattern is encodable.
uint64_t mustBeZero = oImm ^ ~Support::lsbMask<uint64_t>(oIndex + oCount);
if (mustBeZero != 0 || (zIndex > 0 && width - (oIndex + oCount) != 0))
uint64_t must_be_zero = o_imm ^ ~Support::lsb_mask<uint64_t>(o_index + o_count);
if (must_be_zero != 0 || (z_index > 0 && width - (o_index + o_count) != 0u)) {
return false;
}
out->n = width == 64;
out->s = (oCount + zIndex - 1) | (Support::neg(width * 2) & 0x3F);
out->r = width - oIndex;
out->s = (o_count + z_index - 1) | (Support::neg(width * 2u) & 0x3Fu);
out->r = width - o_index;
return true;
}
@@ -122,47 +126,54 @@ static bool encodeLogicalImm(uint64_t imm, uint32_t width, LogicalImm* out) noex
//! width of the operation, and must be either 32 or 64. This function can be used to test whether an immediate
//! value can be used with AND, ANDS, BIC, BICS, EON, EOR, ORN, and ORR instruction.
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isLogicalImm(uint64_t imm, uint32_t width) noexcept {
[[nodiscard]]
static ASMJIT_INLINE bool is_logical_imm(uint64_t imm, uint32_t width) noexcept {
LogicalImm dummy;
return encodeLogicalImm(imm, width, &dummy);
return encode_logical_imm(imm, width, Out(dummy));
}
//! Returns true if the given `imm` value is encodable as an immediate with `add` and `sub` instructions on AArch64.
//! These two instructions can encode 12-bit immediate value optionally shifted left by 12 bits.
[[maybe_unused]]
static ASMJIT_INLINE_NODEBUG bool isAddSubImm(uint64_t imm) noexcept {
[[nodiscard]]
static ASMJIT_INLINE bool is_add_sub_imm(uint64_t imm) noexcept {
return imm <= 0xFFFu || (imm & ~uint64_t(0xFFFu << 12)) == 0;
}
//! Returns true if the given `imm` value is a byte mask. Byte mask has each byte part of the value set to either
//! 0x00 or 0xFF. Some ARM instructions accept immediates that form a byte-mask and this function can be used to
//! verify that the immediate is encodable before using the value.
//! 0x00 or 0xFF. Some ARM instructions accept immediate values that form a byte-mask and this function can be used
//! to verify that the immediate is encodable before using the value.
template<typename T>
static ASMJIT_INLINE_NODEBUG bool isByteMaskImm8(const T& imm) noexcept {
constexpr T kMask = T(0x0101010101010101 & Support::allOnes<T>());
[[nodiscard]]
static ASMJIT_INLINE bool is_byte_mask_imm(const T& imm) noexcept {
constexpr T kMask = T(0x0101010101010101 & Support::bit_ones<T>);
return imm == (imm & kMask) * T(255);
}
// [.......A|B.......|.......C|D.......|.......E|F.......|.......G|H.......]
static ASMJIT_INLINE_NODEBUG uint32_t encodeImm64ByteMaskToImm8(uint64_t imm) noexcept {
[[maybe_unused]]
[[nodiscard]]
static ASMJIT_INLINE uint32_t encode_imm64_byte_mask_to_imm8(uint64_t imm) noexcept {
return uint32_t(((imm >> (7 - 0)) & 0b00000011) | // [.......G|H.......]
((imm >> (23 - 2)) & 0b00001100) | // [.......E|F.......]
((imm >> (39 - 4)) & 0b00110000) | // [.......C|D.......]
((imm >> (55 - 6)) & 0b11000000)); // [.......A|B.......]
}
//! \cond
//! A generic implementation that checjs whether a floating point value can be converted to ARM Imm8.
//! A generic implementation that checks whether a floating point value can be converted to ARM Imm8.
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsbMask<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bitMask(kNumBBits - 1);
[[nodiscard]]
static ASMJIT_INLINE bool is_fp_imm8_generic(T val) noexcept {
constexpr uint32_t kAllBsMask = Support::lsb_mask_const<uint32_t>(kNumBBits);
constexpr uint32_t kB0Pattern = Support::bit_mask<uint32_t>(kNumBBits - 1);
constexpr uint32_t kB1Pattern = kAllBsMask ^ kB0Pattern;
T immZ = val & Support::lsbMask<T>(kNumZeroBits);
uint32_t immB = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
T imm_z = val & Support::lsb_mask<T>(kNumZeroBits);
uint32_t imm_b = uint32_t(val >> (kNumZeroBits + kNumCDEFGHBits)) & kAllBsMask;
// ImmZ must be all zeros and ImmB must either be B0 or B1 pattern.
return immZ == 0 && (immB == kB0Pattern || immB == kB1Pattern);
return imm_z == 0 && (imm_b == kB0Pattern || imm_b == kB1Pattern);
}
//! \endcond
@@ -174,7 +185,8 @@ static ASMJIT_INLINE bool isFPImm8Generic(T val) noexcept {
//! ```
//! [aBbbcdef|gh000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP16Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 3, 6, 6>(val); }
[[nodiscard]]
static ASMJIT_INLINE bool is_fp16_imm8(uint32_t val) noexcept { return is_fp_imm8_generic<uint32_t, 3, 6, 6>(val); }
//! Returns true if the given single precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
@@ -184,9 +196,12 @@ static ASMJIT_INLINE_NODEBUG bool isFP16Imm8(uint32_t val) noexcept { return isF
//! ```
//! [aBbbbbbc|defgh000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(uint32_t val) noexcept { return isFPImm8Generic<uint32_t, 6, 6, 19>(val); }
[[nodiscard]]
static ASMJIT_INLINE bool is_fp32_imm8(uint32_t val) noexcept { return is_fp_imm8_generic<uint32_t, 6, 6, 19>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(float val) noexcept { return isFP32Imm8(Support::bitCast<uint32_t>(val)); }
[[nodiscard]]
static ASMJIT_INLINE bool is_fp32_imm8(float val) noexcept { return is_fp32_imm8(Support::bit_cast<uint32_t>(val)); }
//! Returns true if the given double precision floating point `val` can be encoded as ARM IMM8 value, which represents
//! a limited set of floating point immediate values, which can be used with FMOV instruction.
@@ -196,13 +211,16 @@ static ASMJIT_INLINE_NODEBUG bool isFP32Imm8(float val) noexcept { return isFP32
//! ```
//! [aBbbbbbb|bbcdefgh|00000000|00000000|00000000|00000000|00000000|00000000]
//! ```
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(uint64_t val) noexcept { return isFPImm8Generic<uint64_t, 9, 6, 48>(val); }
[[nodiscard]]
static ASMJIT_INLINE bool is_fp64_imm8(uint64_t val) noexcept { return is_fp_imm8_generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG bool isFP64Imm8(double val) noexcept { return isFP64Imm8(Support::bitCast<uint64_t>(val)); }
[[nodiscard]]
static ASMJIT_INLINE bool is_fp64_imm8(double val) noexcept { return is_fp64_imm8(Support::bit_cast<uint64_t>(val)); }
//! \cond
template<typename T, uint32_t kNumBBits, uint32_t kNumCDEFGHBits, uint32_t kNumZeroBits>
static ASMJIT_INLINE_NODEBUG uint32_t encodeFPToImm8Generic(T val) noexcept {
static ASMJIT_INLINE uint32_t encode_fp_to_imm8_generic(T val) noexcept {
uint32_t bits = uint32_t(val >> kNumZeroBits);
return ((bits >> (kNumBBits + kNumCDEFGHBits - 7)) & 0x80u) | (bits & 0x7F);
}
@@ -210,11 +228,14 @@ static ASMJIT_INLINE_NODEBUG uint32_t encodeFPToImm8Generic(T val) noexcept {
//! Encodes a double precision floating point value into IMM8 format.
//!
//! \note This function expects that `isFP64Imm8(val) == true` so it doesn't perform any checks of the value and just
//! \note This function expects that `is_fp64_imm8(val) == true` so it doesn't perform any checks of the value and just
//! rearranges some bits into Imm8 order.
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(uint64_t val) noexcept { return encodeFPToImm8Generic<uint64_t, 9, 6, 48>(val); }
[[nodiscard]]
static ASMJIT_INLINE uint32_t encode_fp64_to_imm8(uint64_t val) noexcept { return encode_fp_to_imm8_generic<uint64_t, 9, 6, 48>(val); }
//! \overload
static ASMJIT_INLINE_NODEBUG uint32_t encodeFP64ToImm8(double val) noexcept { return encodeFP64ToImm8(Support::bitCast<uint64_t>(val)); }
[[nodiscard]]
static ASMJIT_INLINE uint32_t encode_fp64_to_imm8(double val) noexcept { return encode_fp64_to_imm8(Support::bit_cast<uint64_t>(val)); }
} // {Utils}

View File

@@ -21,7 +21,6 @@
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
#ifndef ASMJIT_CORE_H_INCLUDED
#define ASMJIT_CORE_H_INCLUDED
@@ -80,14 +79,14 @@ namespace asmjit {
//! - Make sure that you use \ref ErrorHandler, see \ref asmjit_error_handling.
//!
//! - Instruction validation in your debug builds can reveal problems too. AsmJit provides validation at instruction
//! level that can be enabled via \ref BaseEmitter::addDiagnosticOptions(). See \ref DiagnosticOptions for more
//! level that can be enabled via \ref BaseEmitter::add_diagnostic_options(). See \ref DiagnosticOptions for more
//! details.
//!
//! - If you are a Compiler user, use diagnostic options and read carefully if anything suspicious pops out.
//! Diagnostic options can be enabled via \ref BaseEmitter::addDiagnosticOptions(). If unsure which ones to use,
//! Diagnostic options can be enabled via \ref BaseEmitter::add_diagnostic_options(). If unsure which ones to use,
//! enable annotations and all debug options: `DiagnosticOptions::kRAAnnotate | DiagnosticOptions::kRADebugAll`.
//!
//! - Make sure you put a breakpoint into \ref DebugUtils::errored() function if you have a problem with AsmJit
//! - Make sure you put a breakpoint into \ref make_error() function if you have a problem with AsmJit
//! returning errors during instruction encoding or register allocation. Having an active breakpoint there can
//! help to reveal the origin of the error, to inspect variables and other conditions that caused it.
//!
@@ -270,7 +269,7 @@ namespace asmjit {
//! \defgroup asmjit_breaking_changes Breaking Changes
//! \brief Documentation of breaking changes
//! \brief Documentation of breaking changes.
//!
//! ### Overview
//!
@@ -280,25 +279,26 @@ namespace asmjit {
//! removed APIs and should serve as a how-to guide for people that want to port existing code to work with the
//! newest AsmJit.
//!
//! \section tips Tips
//!
//! Useful tips before you start:
//!
//! - Visit our [Public Chat](https://app.gitter.im/#/room/#asmjit:gitter.im) if you need a quick help.
//!
//! - Build AsmJit with `ASMJIT_NO_DEPRECATED` macro defined to make sure that you are not using deprecated
//! functionality at all. Deprecated functions are decorated with `[[deprecated]]` attribute, but sometimes
//! it's not possible to decorate everything like classes, which are used by deprecated functions as well,
//! because some compilers would warn about that. If your project compiles fine with `ASMJIT_NO_DEPRECATED`
//! it's not using anything, which will be definitely removed in the future.
//!
//! \section api_changes API Changes
//!
//! ### Changes committed at 2025-XX-XX
//!
//! Core changes:
//!
//! - Refactored AsmJit to use snake_case_function_names() instead of camelCasedFunctionNames().
//!
//! - Renamed Compiler's `newIntPtr()` and `newUIntPtr()` to `new_gp_ptr()`, or you should use just `new_gpz()`.
//!
//! - Renamed Zone to Arena (including containers) and merged Arena and ArenaAllocator into a single class.
//!
//! - Removed `Support::Temporary` in favor of `Span<uint8_t>`. `CodeHolder` and `Arena` now accept
//! `Span<uint8_t>` instead of `Support::Temporary`.
//!
//! ### Changes committed at 2025-06-15
//!
//! Core changes:
//!
//! - No more architecture specific \ref RegTraits - removed `BaseRegTraits` and kept just \ref RegTraits:
//! - No more architecture specific \ref RegTraits - removed `BaseRegTraits` and kept just RegTraits:
//!
//! - `BaseRegTraits` -> `RegTraits`
//! - `arm::RegTraits` -> `RegTraits`
@@ -307,7 +307,7 @@ namespace asmjit {
//! - Removed register signature and helper functions from ArchTraits. This functionality is now available
//! via asmjit::RegTraits and asmjit::RegUtils and doesn't require a valid architecture traits instance.
//!
//! - No more architecture specific Gp/Vec/Mask register types in \ref RegType and \ref RegGroup:
//! - No more architecture specific Gp/Vec/Mask register types in RegType and RegGroup:
//!
//! - `RegGroup::kX86_Rip` -> `RegGroup::kPC`
//! - `RegGroup::kX86_KReg` -> `RegGroup::kMask`
@@ -346,8 +346,8 @@ namespace asmjit {
//! - `regOp.type()` -> `regOp.regType()`
//! - `regOp.group()` -> `regOp.regGroup()`
//!
//! - Removed some static functions from \ref Operand, \reg Reg, etc... in favor of member functions. Most
//! of the operand functionality is now provided by \ref Operand_:
//! - Removed some static functions from \ref Operand, \ref Reg, etc... in favor of member functions. Most
//! of the operand functionality is now provided by \ref Operand_ class:
//!
//! - `Operand::isGp(op)` -> op.isGp();
//! - `x86::Reg::isGp(op, id)` -> op.isGp(id);
@@ -661,7 +661,6 @@ namespace asmjit {
//! AsmJit also provides global constants:
//!
//! - \ref Globals - namespace that provides global constants.
//! - \ref ByteOrder - byte-order constants and functionality.
//!
//! \note CodeHolder examples use \ref x86::Assembler as abstract interfaces cannot be used to generate code.
//!
@@ -683,7 +682,7 @@ namespace asmjit {
//!
//! CodeHolder code; // Holds code and relocation information.
//! code.init(rt.environment(), // Initialize code to match the JIT environment.
//! rt.cpuFeatures());
//! rt.cpu_features());
//!
//! x86::Assembler a(&code); // Create and attach x86::Assembler to code.
//! a.mov(x86::eax, 1); // Move one to eax register.
@@ -692,7 +691,10 @@ namespace asmjit {
//!
//! Func fn; // Holds address to the generated function.
//! Error err = rt.add(&fn, &code); // Add the generated code to the runtime.
//! if (err) return 1; // Handle a possible error returned by AsmJit.
//!
//! if (err != Error::kOk) {
//! return 1; // Handle a possible error returned by AsmJit.
//! }
//! // ===== CodeHolder is no longer needed from here and can be destroyed =====
//!
//! int result = fn(); // Execute the generated code.
@@ -747,7 +749,7 @@ namespace asmjit {
//!
//! // Create a custom environment initialized to 32-bit X86 architecture.
//! Environment env;
//! env.setArch(Arch::kX86);
//! env.set_arch(Arch::kX86);
//!
//! CodeHolder code; // Create a CodeHolder.
//! code.init(env); // Initialize CodeHolder with custom environment.
@@ -771,8 +773,8 @@ namespace asmjit {
//! // and CodeBuffer structure. We are interested in section's CodeBuffer.
//! //
//! // NOTE: The first section is always '.text', it can be retrieved by
//! // code.sectionById(0) or simply by code.textSection().
//! CodeBuffer& buffer = code.textSection()->buffer();
//! // code.section_by_id(0) or simply by code.text_section().
//! CodeBuffer& buffer = code.text_section()->buffer();
//!
//! // Print the machine-code generated or do something else with it...
//! // 8B4424048B4C24048B5424040F28010F58010F2900C3
@@ -797,17 +799,17 @@ namespace asmjit {
//! is stored in \ref LabelEntry and \ref CodeHolder as a single-linked list. Fixup represents either a reference to an
//! unbound label and cross-sections references (only relevant to code that uses multiple sections). Since crossing
//! sections is something that cannot be resolved immediately these fixups persist until offsets of these sections are
//! assigned and until \ref CodeHolder::resolveCrossSectionFixups() is called. It's an error if you end up with code that
//! still has fixups after flattening. You can verify it by calling \ref CodeHolder::hasUnresolvedFixups(), which inspects
//! the value returned by \ref CodeHolder::unresolvedFixupCount().
//! assigned and until \ref CodeHolder::resolve_cross_section_fixups() is called. It's an error if you end up with code that
//! still has fixups after flattening. You can verify it by calling \ref CodeHolder::has_unresolved_fixups(), which inspects
//! the value returned by \ref CodeHolder::unresolved_fixup_count().
//!
//! AsmJit can flatten code that uses multiple sections by assigning each section an incrementing offset that respects
//! its alignment. Use \ref CodeHolder::flatten() to do that. After the sections are flattened their offsets and
//! virtual sizes are adjusted to respect each section's buffer size and alignment. The \ref
//! CodeHolder::resolveCrossSectionFixups() function must be called before relocating the code held by \ref CodeHolder.
//! CodeHolder::resolve_cross_section_fixups() function must be called before relocating the code held by \ref CodeHolder.
//! You can also flatten your code manually by iterating over all sections and calculating their offsets (relative to
//! base) by your own algorithm. In that case \ref CodeHolder::flatten() should not be called, however,
//! \ref CodeHolder::resolveCrossSectionFixups() should be.
//! \ref CodeHolder::resolve_cross_section_fixups() should be.
//!
//! The example below shows how to use a built-in virtual memory allocator \ref JitAllocator instead of using \ref
//! JitRuntime (just in case you want to use your own memory management) and how to relocate the generated code
@@ -828,10 +830,10 @@ namespace asmjit {
//! int main() {
//! // Create a custom environment that matches the current host environment.
//! Environment env = Environment::host();
//! CpuFeatures cpuFeatures = CpuInfo::host().features();
//! CpuFeatures cpu_features = CpuInfo::host().features();
//!
//! CodeHolder code; // Create a CodeHolder.
//! code.init(env, cpuFeatures); // Initialize CodeHolder with environment.
//! code.init(env, cpu_features); // Initialize CodeHolder with environment.
//!
//! x86::Assembler a(&code); // Create and attach x86::Assembler to `code`.
//!
@@ -842,7 +844,7 @@ namespace asmjit {
//!
//! // Handle the difference between 32-bit and 64-bit calling conventions
//! // (arguments passed through stack vs. arguments passed by registers).
//! if (env.is32Bit()) {
//! if (env.is_32bit()) {
//! dst = x86::eax;
//! src_a = x86::ecx;
//! src_b = x86::edx;
@@ -851,7 +853,7 @@ namespace asmjit {
//! a.mov(src_b, x86::dword_ptr(x86::esp, 12));
//! }
//! else {
//! if (env.isPlatformWindows()) {
//! if (env.is_platform_windows()) {
//! dst = x86::rcx; // First argument (destination pointer).
//! src_a = x86::rdx; // Second argument (source 'a' pointer).
//! src_b = x86::r8; // Third argument (source 'b' pointer).
@@ -873,18 +875,18 @@ namespace asmjit {
//! // called '.addrtab' (address table section), which would be filled by data
//! // required by relocations (absolute jumps and calls). You can omit this code
//! // if you are 100% sure your code doesn't contain multiple sections and
//! // such relocations. You can use `CodeHolder::hasAddressTable()` to verify
//! // whether the address table section does exist.
//! // such relocations. You can use `CodeHolder::has_address_table_section()` to
//! // verify whether the address table section does exist.
//! code.flatten();
//! code.resolveCrossSectionFixups();
//! code.resolve_cross_section_fixups();
//!
//! // After the code was generated it can be relocated manually to any memory
//! // location, however, we need to know it's size before we perform memory
//! // allocation. `CodeHolder::codeSize()` returns the worst estimated code
//! // allocation. `CodeHolder::code_size()` returns the worst estimated code
//! // size in case that relocations are not possible without trampolines (in
//! // that case some extra code at the end of the current code buffer is
//! // generated during relocation).
//! size_t estimatedSize = code.codeSize();
//! size_t estimated_size = code.code_size();
//!
//! // Instead of rolling up our own memory allocator we can use the one AsmJit
//! // provides. It's decoupled so you don't need to use `JitRuntime` for that.
@@ -892,9 +894,9 @@ namespace asmjit {
//!
//! // Allocate an executable virtual memory and handle a possible failure.
//! JitAllocator::Span span;
//! Error err = allocator.alloc(span, estimatedSize);
//! Error err = allocator.alloc(span, estimated_size);
//!
//! if (err != kErrorOk) { // <- NOTE: This must be checked, always!
//! if (err != Error::kOk) { // <- NOTE: This must be checked, always!
//! return 0;
//! }
//!
@@ -902,13 +904,13 @@ namespace asmjit {
//! // Please note that this DOESN'T COPY anything to it. This function will
//! // store the address in CodeHolder and use relocation entries to patch
//! // the existing code in all sections to respect the base address provided.
//! code.relocateToBase((uint64_t)span.rx());
//! code.relocate_to_base((uint64_t)span.rx());
//!
//! // This is purely optional. There are cases in which the relocation can omit
//! // unneeded data, which would shrink the size of address table. If that
//! // happened the codeSize returned after relocateToBase() would be smaller
//! // than the originally `estimatedSize`.
//! size_t codeSize = code.codeSize();
//! // happened the code_size returned after relocate_to_base() would be smaller
//! // than the originally `estimated_size`.
//! size_t code_size = code.code_size();
//!
//! // This will copy code from all sections to `p`. Iterating over all sections
//! // and calling `memcpy()` would work as well, however, this function supports
@@ -917,24 +919,26 @@ namespace asmjit {
//! //
//! // With some additional features, copyFlattenData() does roughly the following:
//! //
//! // allocator.write([&](JitAllocator::Span& span) {
//! // allocator.write([&](JitAllocator::Span& span) noexcept -> Error {
//! // for (Section* section : code.sections()) {
//! // uint8_t* p = (uint8_t*)span.rw() + section->offset();
//! // memcpy(p, section->data(), section->bufferSize());
//! // memcpy(p, section->data(), section->buffer_size());
//! // }
//! // return Error::kOk;
//! // }
//! allocator.write([&](JitAllocator::Span& span) {
//! code.copyFlattenedData(span.rw(), codeSize, CopySectionFlags::kPadSectionBuffer);
//! allocator.write([&](JitAllocator::Span& span) noexcept -> Error {
//! code.copy_flattened_data(span.rw(), code_size, CopySectionFlags::kPadSectionBuffer);
//! return Error::kOk;
//! });
//!
//! // Execute the generated function.
//! int inA[4] = { 4, 3, 2, 1 };
//! int inB[4] = { 1, 5, 2, 8 };
//! int in_a[4] = { 4, 3, 2, 1 };
//! int in_b[4] = { 1, 5, 2, 8 };
//! int out[4];
//!
//! // This code uses AsmJit's ptr_as_func<> to cast between void* and SumIntsFunc.
//! SumIntsFunc fn = ptr_as_func<SumIntsFunc>(span.rx());
//! fn(out, inA, inB);
//! fn(out, in_a, in_b);
//!
//! // Prints {5 8 4 9}
//! printf("{%d %d %d %d}\n", out[0], out[1], out[2], out[3]);
@@ -959,13 +963,13 @@ namespace asmjit {
//!
//! using namespace asmjit;
//!
//! void initializeCodeHolder(CodeHolder& code) {
//! void initialize_code_holder_example(CodeHolder& code) {
//! Environment env = Environment::host();
//! CpuFeatures cpuFeatures = CpuInfo::host().features();
//! uint64_t baseAddress = uint64_t(0x1234);
//! CpuFeatures cpu_features = CpuInfo::host().features();
//! uint64_t base_address = uint64_t(0x1234);
//!
//! // initialize CodeHolder with environment and custom base address.
//! code.init(env, cpuFeatures, baseAddress);
//! code.init(env, cpu_features, base_address);
//! }
//! ```
//!
@@ -981,17 +985,17 @@ namespace asmjit {
//!
//! using namespace asmjit;
//!
//! void labelLinksExample(CodeHolder& code, const Label& label) {
//! void label_links_example(CodeHolder& code, const Label& label) {
//! // Tests whether the `label` is bound.
//! bool isBound = code.isLabelBound(label);
//! printf("Label %u is %s\n", label.id(), isBound ? "bound" : "not bound");
//! bool is_bound = code.is_label_bound(label);
//! printf("Label %u is %s\n", label.id(), is_bound ? "bound" : "not bound");
//!
//! // Returns true if the code contains either referenced, but unbound
//! // labels, or cross-section fixups that are not resolved yet.
//! bool hasUnresolved = code.hasUnresolvedFixups(); // Boolean answer.
//! size_t nUnresolved = code.unresolvedFixupCount(); // Count of unresolved fixups.
//! bool has_unresolved = code.has_unresolved_fixups(); // Boolean answer.
//! size_t n_unresolved = code.unresolved_fixup_count(); // Count of unresolved fixups.
//!
//! printf("Number of unresolved fixups: %zu\n", nUnresolved);
//! printf("Number of unresolved fixups: %zu\n", n_unresolved);
//! }
//! ```
//!
@@ -1011,15 +1015,15 @@ namespace asmjit {
//! // to the start of the section, see below for alternative. If the given
//! // label is not bound the offset returned will be zero. It's recommended
//! // to always check whether the label is bound before using its offset.
//! uint64_t sectionOffset = code.labelOffset(label);
//! printf("Label offset relative to section: %llu\n", (unsigned long long)sectionOffset);
//! uint64_t section_offset = code.label_offset(label);
//! printf("Label offset relative to section: %llu\n", (unsigned long long)section_offset);
//!
//! // If you use multiple sections and want the offset relative to the base.
//! // NOTE: This function expects that the section has already an offset and
//! // the label-link was resolved (if this is not true you will still get an
//! // offset relative to the start of the section).
//! uint64_t baseOffset = code.labelOffsetFromBase(label);
//! printf("Label offset relative to base: %llu\n", (unsigned long long)baseOffset);
//! uint64_t base_offset = code.label_offset_from_base(label);
//! printf("Label offset relative to base: %llu\n", (unsigned long long)base_offset);
//! }
//! ```
//!
@@ -1035,13 +1039,13 @@ namespace asmjit {
//!
//! using namespace asmjit;
//!
//! void sectionsExample(CodeHolder& code) {
//! void sections_example(CodeHolder& code) {
//! // Text section is always provided as the first section.
//! Section* text = code.textSection(); // or code.sectionById(0);
//! Section* text = code.text_section(); // or code.section_by_id(0);
//!
//! // To create another section use CodeHolder::newSection().
//! // To create another section use CodeHolder::new_section().
//! Section* data;
//! Error err = code.newSection(&data,
//! Error err = code.new_section(Out(data),
//! ".data", // Section name
//! SIZE_MAX, // Name length if the name is not null terminated (or SIZE_MAX).
//! SectionFlags::kNone, // Section flags, see SectionFlags.
@@ -1053,7 +1057,7 @@ namespace asmjit {
//! // the cursor would be placed at the end of the first (.text) section, which
//! // is initially empty.
//! x86::Assembler a(&code);
//! Label L_Data = a.newLabel();
//! Label L_Data = a.new_label();
//!
//! a.mov(x86::eax, x86::ebx); // Emits in .text section.
//!
@@ -1082,34 +1086,33 @@ namespace asmjit {
//! using namespace asmjit;
//!
//! // ... (continuing the previous example) ...
//! void sectionsExampleContinued(CodeHolder& code) {
//! void sections_example_continued(CodeHolder& code) {
//! // Suppose we have some code that contains multiple sections and
//! // we would like to flatten it by using AsmJit's built-in API:
//! Error err = code.flatten();
//! if (err) {
//! if (err != Error::kOk) {
//! // There are many reasons it can fail, so always handle a possible error.
//! printf("Failed to flatten the code: %s\n", DebugUtils::errorAsString(err));
//! printf("Failed to flatten the code: %s\n", DebugUtils::error_as_string(err));
//! exit(1);
//! }
//!
//! // After flattening all sections would contain assigned offsets
//! // relative to base. Offsets are 64-bit unsigned integers so we
//! // cast them to `size_t` for simplicity. On 32-bit targets it's
//! // guaranteed that the offset cannot be greater than `2^32 - 1`.
//! // After flattening all sections would contain assigned offsets relative to base.
//! // Offsets are 64-bit unsigned integers so we cast them to `size_t` for simplicity.
//! // On 32-bit targets it's guaranteed that the offset cannot be greater than `2^32 - 1`.
//! printf("Data section offset %zu", size_t(data->offset()));
//!
//! // The flattening doesn't resolve unresolved fixups, this
//! // has to be done manually as flattening can be done separately.
//! err = code.resolveCrossSectionFixups();
//! if (err) {
//! err = code.resolve_cross_section_fixups();
//! if (err != Error::kOk) {
//! // This is the kind of error that should always be handled...
//! printf("Failed to resolve fixups: %s\n", DebugUtils::errorAsString(err));
//! printf("Failed to resolve fixups: %s\n", DebugUtils::error_as_string(err));
//! exit(1);
//! }
//!
//! if (code.hasUnresolvedFixups()) {
//! if (code.has_unresolved_fixups()) {
//! // This would mean either unbound label or some other issue.
//! printf("The code has %zu unbound labels\n", code.unresolvedFixupCount());
//! printf("The code has %zu unbound labels\n", code.unresolved_fixup_count());
//! exit(1);
//! }
//! }
@@ -1135,9 +1138,10 @@ namespace asmjit {
//! - \ref UniVec - Universal abstraction of a vector register, inherited by:
//! - \ref x86::Vec - Vector register operand specific to X86 and X86_64 architectures.
//! - \ref a64::Vec - Vector register operand specific to AArch64 architecture.
//! - \ref x86::Mm, \ref x86::KReg, \ref x86::Tmm, and other architecture specific register operands.
//! - \ref BaseMem - Base class for a memory operand, inherited by:
//! - \ref x86::Mem - Memory operand specific to X86 and X86_64 architectures.
//! - \ref arm::Mem - Memory operand specific to AArch64 architecture.
//! - \ref a64::Mem - Memory operand specific to AArch64 architecture.
//! - \ref Imm - Immediate (value) operand.
//! - \ref Label - Label operand.
//!
@@ -1161,18 +1165,23 @@ namespace asmjit {
//! are commonly accessible by getters and setters:
//!
//! - \ref Operand - Base operand, which only provides accessors that are common to all operand types.
//! - \ref BaseReg - Describes either physical or virtual register. Physical registers have id that matches the
//! target's machine id directly whereas virtual registers must be allocated into physical registers by a register
//! allocator pass. Register operand provides:
//! - \ref Reg - Describes either physical or virtual register. Physical registers have ids that match the target's
//! machine id directly whereas virtual registers must be allocated into physical registers by a register allocator
//! pass. Register operand provides:
//! - Register Type (\ref RegType) - Unique id that describes each possible register provided by the target
//! architecture - for example X86 backend provides general purpose registers (GPB-LO, GPB-HI, GPW, GPD, and GPQ)
//! and all types of other registers like K, MM, BND, XMM, YMM, ZMM, and TMM.
//! and various types of other registers like K, MM, BND, XMM, YMM, ZMM, and TMM.
//! - Register Group (\ref RegGroup) - Groups multiple register types under a single group - for example all
//! general-purpose registers (of all sizes) on X86 are part of \ref RegGroup::kGp and all SIMD registers
//! (XMM, YMM, ZMM) are part of \ref RegGroup::kVec.
//! - Register Size - Contains the size of the register in bytes. If the size depends on the mode (32-bit vs
//! 64-bit) then generally the higher size is used (for example RIP register has size 8 by default).
//! - Register Id - Contains physical or virtual id of the register.
//! - Unified interface of general purpose registers is provided by \ref UniGp, which acts as a base of
//! all architecture specific GP registers such as \ref x86::Gp and \ref a64::Gp.
//! - Unified interface of vector registers is provided by \ref UniVec, which acts as a base of all architecture
//! specific vector registers such as \ref x86::Vec and \ref a64::Vec. Please note that X86 MMX registers are
//! not part of \ref x86::Vec, instead they are modeled as \ref x86::Mm.
//! - \ref BaseMem - Used to reference a memory location. Memory operand provides:
//! - Base Register - A base register type and id (physical or virtual).
//! - Index Register - An index register type and id (physical or virtual).
@@ -1213,28 +1222,28 @@ namespace asmjit {
//! x86::Mem m = x86::ptr(src, idx);
//!
//! // Examine `m`: Returns `RegType::kGp64`.
//! m.indexType();
//! m.index_type();
//! // Examine `m`: Returns 10 (`r10`).
//! m.indexId();
//! m.index_id();
//!
//! // Reconstruct `idx` stored in mem:
//! x86::Gp idx_2 = x86::Gp::fromTypeAndId(m.indexType(), m.indexId());
//! x86::Gp idx_2 = x86::Gp::from_type_and_id(m.index_type(), m.index_id());
//!
//! // True, `idx` and idx_2` are identical.
//! // True, `idx` and `idx_2` are identical.
//! idx == idx_2;
//!
//! // Possible - op will still be the same as `m`.
//! Operand op = m;
//! // True (can be casted to BaseMem or architecture-specific Mem).
//! op.isMem();
//! op.is_mem();
//!
//! // True, `op` is just a copy of `m`.
//! m == op;
//!
//! // Static cast is fine and valid here.
//! static_cast<BaseMem&>(op).addOffset(1);
//! static_cast<BaseMem&>(op).add_offset(1);
//! // However, using `as<T>()` to cast to a derived type is preferred.
//! op.as<BaseMem>().addOffset(1);
//! op.as<BaseMem>().add_offset(1);
//! // False, `op` now points to [rax + r10 + 2], which is not [rax + r10].
//! m == op;
//!
@@ -1251,7 +1260,7 @@ namespace asmjit {
//! ```
//!
//! Some operands have to be created explicitly by emitters. For example labels must be created by \ref
//! BaseEmitter::newLabel(), which creates a label entry and returns a \ref Label operand with the id that refers
//! BaseEmitter::new_label(), which creates a label entry and returns a \ref Label operand with the id that refers
//! to it. Such label then can be used by emitters.
//!
//! \section memory_operands Memory Operands
@@ -1334,19 +1343,18 @@ namespace asmjit {
//! // The same as: dword ptr [rax + 12].
//! x86::Mem mem = x86::dword_ptr(x86::rax, 12);
//!
//! mem.hasBase(); // true.
//! mem.hasIndex(); // false.
//! mem.has_base(); // true.
//! mem.has_index(); // false.
//! mem.size(); // 4.
//! mem.offset(); // 12.
//!
//! mem.setSize(0); // Sets the size to 0 (makes it size-less).
//! mem.addOffset(-1); // Adds -1 to the offset and makes it 11.
//! mem.setOffset(0); // Sets the offset to 0.
//! mem.setBase(x86::rcx); // Changes BASE to RCX.
//! mem.setIndex(x86::rax); // Changes INDEX to RAX.
//! mem.hasIndex(); // true.
//! mem.set_size(0); // Sets the size to 0 (makes it size-less).
//! mem.add_offset(-1); // Adds -1 to the offset and makes it 11.
//! mem.set_offset(0); // Sets the offset to 0.
//! mem.set_base(x86::rcx); // Changes BASE to RCX.
//! mem.set_index(x86::rax); // Changes INDEX to RAX.
//! mem.has_index(); // true.
//! }
//! // ...
//! ```
//!
//! Making changes to memory operand is very comfortable when emitting loads
@@ -1375,13 +1383,13 @@ namespace asmjit {
//! x86::Mem mDst = mSrc.clone(); // Clone mSrc.
//!
//! a.movaps(mDst, x86::xmm0); // Stores xmm0 to [eax].
//! mDst.addOffset(16); // Adds 16 to `mDst`.
//! mDst.add_offset(16); // Adds 16 to `mDst`.
//!
//! a.movaps(mDst, x86::xmm1); // Stores to [eax + 16] .
//! mDst.addOffset(16); // Adds 16 to `mDst`.
//! mDst.add_offset(16); // Adds 16 to `mDst`.
//!
//! a.movaps(mDst, x86::xmm2); // Stores to [eax + 32].
//! mDst.addOffset(16); // Adds 16 to `mDst`.
//! mDst.add_offset(16); // Adds 16 to `mDst`.
//!
//! a.movaps(mDst, x86::xmm3); // Stores to [eax + 48].
//! }
@@ -1487,7 +1495,7 @@ namespace asmjit {
//! and they have to be implemented in the same way.
//!
//! - Compiler provides a useful debugging functionality, which can be turned on through \ref FormatFlags. Use
//! \ref Logger::addFlags() to turn on additional logging features when using Compiler.
//! \ref Logger::add_flags() to turn on additional logging features when using Compiler.
//! \defgroup asmjit_function Function
@@ -1560,7 +1568,7 @@ namespace asmjit {
//!
//! - \ref FormatOptions - Formatting options that can change how instructions and operands are formatted.
//!
//! - \ref Formatter - A namespace that provides functions that can format input data like \ref Operand, \ref BaseReg,
//! - \ref Formatter - A namespace that provides functions that can format input data like \ref Operand, \ref Reg,
//! \ref Label, and \ref BaseNode into \ref String.
//!
//! AsmJit's \ref Logger serves the following purposes:
@@ -1595,8 +1603,8 @@ namespace asmjit {
//!
//! CodeHolder code; // Holds code and relocation information.
//! code.init(rt.environment(), // Initialize code to match the JIT environment.
//! rt.cpuFeatures());
//! code.setLogger(&logger); // Attach the `logger` to `code` holder.
//! rt.cpu_features());
//! code.set_logger(&logger); // Attach the `logger` to `code` holder.
//!
//! // ... code as usual, everything emitted will be logged to `stdout` ...
//! return 0;
@@ -1619,8 +1627,8 @@ namespace asmjit {
//!
//! CodeHolder code; // Holds code and relocation information.
//! code.init(rt.environment(), // Initialize code to match the JIT environment.
//! rt.cpuFeatures());
//! code.setLogger(&logger); // Attach the `logger` to `code` holder.
//! rt.cpu_features());
//! code.set_logger(&logger); // Attach the `logger` to `code` holder.
//!
//! // ... code as usual, logging will be concatenated to logger string ...
//!
@@ -1656,10 +1664,10 @@ namespace asmjit {
//! BaseEmitter* emitter = nullptr;
//!
//! // No flags by default.
//! FormatFlags formatFlags = FormatFlags::kNone;
//! FormatFlags format_flags = FormatFlags::kNone;
//!
//! StringTmp<128> sb;
//! Formatter::formatOperand(sb, formatFlags, emitter, arch, op);
//! Formatter::format_operand(sb, format_flags, emitter, arch, op);
//! printf("%s\n", sb.data());
//! }
//!
@@ -1693,14 +1701,14 @@ namespace asmjit {
//! BaseEmitter* emitter = nullptr;
//!
//! // No flags by default.
//! FormatFlags formatFlags = FormatFlags::kNone;
//! FormatFlags format_flags = FormatFlags::kNone;
//!
//! // The formatter expects operands in an array.
//! Operand_ operands[] { std::forward<Args>(args)... };
//!
//! StringTmp<128> sb;
//! Formatter::formatInstruction(
//! sb, formatFlags, emitter, arch, inst, operands, sizeof...(args));
//! Formatter::format_instruction(
//! sb, format_flags, emitter, arch, inst, operands, sizeof...(args));
//! printf("%s\n", sb.data());
//! }
//!
@@ -1720,7 +1728,7 @@ namespace asmjit {
//! BaseInst(Inst::kIdVaddpd),
//! zmm0, zmm1, ptr(rax)._1to8());
//!
//! // BaseInst abstracts instruction id, instruction options, and extraReg.
//! // BaseInst abstracts instruction id, instruction options, and extra_reg.
//! // Prints 'lock add [rax], rcx'.
//! logInstruction(arch,
//! BaseInst(Inst::kIdAdd, InstOptions::kX86_Lock),
@@ -1744,7 +1752,7 @@ namespace asmjit {
//! using namespace asmjit;
//!
//! void formattingExample(BaseBuilder* builder) {
//! FormatOptions formatOptions {};
//! FormatOptions format_options {};
//!
//! // This also shows how temporary strings can be used.
//! StringTmp<512> sb;
@@ -1753,7 +1761,7 @@ namespace asmjit {
//! // were zero (no extra flags), and the builder instance, which we have
//! // provided. An overloaded version also exists, which accepts begin and
//! // and end nodes, which can be used to only format a range of nodes.
//! Formatter::formatNodeList(sb, formatOptions, builder);
//! Formatter::format_node_list(sb, format_options, builder);
//!
//! // You can do whatever else with the string, it's always null terminated,
//! // so it can be passed to C functions like printf().
@@ -1769,7 +1777,7 @@ namespace asmjit {
//!
//! AsmJit uses error codes to represent and return errors. Every function that can fail returns an \ref Error code.
//! Exceptions are never thrown by AsmJit itself even in extreme conditions like out-of-memory, but it's possible to
//! override \ref ErrorHandler::handleError() to throw, in that case no error will be returned and exception will be
//! override \ref ErrorHandler::handle_error() to throw, in that case no error will be returned and exception will be
//! thrown instead. All functions where this can happen are not marked `noexcept`.
//!
//! Errors should never be ignored, however, checking errors after each AsmJit API call would simply over-complicate
@@ -1780,11 +1788,11 @@ namespace asmjit {
//! - Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal
//! to throw an exception from the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler, Builder and Compiler to a
//! consistent state before calling \ref ErrorHandler::handleError(), so `longjmp()` can be used without issues
//! consistent state before calling \ref ErrorHandler::handle_error(), so `longjmp()` can be used without issues
//! to cancel the code-generation if an error occurred. This method can be used if exception handling in your
//! project is turned off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref
//! Zone memory and the ownership of memory it allocates always ends with the instance that allocated it. If
//! using this approach please never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//! Arena allocated memory and the ownership of memory it allocates always ends with the instance that allocated
//! it. If using this approach please never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//!
//! \section using_error_handler Using ErrorHandler
//!
@@ -1799,7 +1807,7 @@ namespace asmjit {
//! // A simple error handler implementation, extend according to your needs.
//! class MyErrorHandler : public ErrorHandler {
//! public:
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! void handle_error(Error err, const char* message, BaseEmitter* origin) override {
//! printf("AsmJit error: %s\n", message);
//! }
//! };
@@ -1810,8 +1818,8 @@ namespace asmjit {
//! MyErrorHandler myErrorHandler;
//! CodeHolder code;
//!
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&myErrorHandler);
//! code.init(rt.environment(), rt.cpu_features());
//! code.set_error_handler(&myErrorHandler);
//!
//! x86::Assembler a(&code);
//! // ... code generation ...
@@ -1854,10 +1862,10 @@ namespace asmjit {
//!
//! The instruction query API is provided by \ref InstAPI namespace. The following queries are possible:
//!
//! - \ref InstAPI::queryRWInfo() - queries read/write information of the given instruction and its operands.
//! - \ref InstAPI::query_rw_info() - queries read/write information of the given instruction and its operands.
//! Includes also CPU flags read/written.
//!
//! - \ref InstAPI::queryFeatures() - queries CPU features that are required to execute the given instruction. A full
//! - \ref InstAPI::query_features() - queries CPU features that are required to execute the given instruction. A full
//! instruction with operands must be given as some architectures like X86 may require different features for the
//! same instruction based on its operands.
//!
@@ -1872,7 +1880,7 @@ namespace asmjit {
//! - \ref InstAPI::validate() - low-level instruction validation function that is used internally by emitters
//! if strict validation is enabled.
//!
//! - \ref BaseEmitter::addDiagnosticOptions() - can be used to enable validation at emitter level, see \ref
//! - \ref BaseEmitter::add_diagnostic_options() - can be used to enable validation at emitter level, see \ref
//! DiagnosticOptions.
@@ -1883,19 +1891,19 @@ namespace asmjit {
//!
//! AsmJit's virtual memory management is divided into three main categories:
//!
//! - Low level interface that provides cross-platform abstractions for virtual memory allocation. Implemented in
//! \ref VirtMem namespace. This API is a thin wrapper around operating system specific calls such as
//! `VirtualAlloc()` and `mmap()` and it's intended to be used by AsmJit's higher level API. Low-level virtual
//! memory functions can be used to allocate virtual memory, change its permissions, and to release it.
//! Additionally, an API that allows to create dual mapping (to support hardened environments) is provided.
//! - \ref VirtMem namespace provides low level interface that can be used for cross-platform virtual memory
//! allocation. This API is a thin wrapper around operating system specific calls such as `VirtualAlloc()` and
//! `mmap()` and it's intended to be used by AsmJit's higher level API. Low-level virtual memory functions can
//! be used to allocate virtual memory, change its permissions, and to release it. Additionally, an API that
//! allows to create dual mapping (to support hardened environments) is provided.
//!
//! - Middle level API that is provided by \ref JitAllocator, which uses \ref VirtMem internally and offers nicer
//! API that can be used by users to allocate executable memory conveniently. \ref JitAllocator tries to be smart,
//! for example automatically using dual mapping or `MAP_JIT` on hardened environments.
//! - \ref JitAllocator provides middle level API, which is built on top of \ref VirtMem internally and offers
//! nicer API that can be used by users to allocate executable memory conveniently. \ref JitAllocator tries to
//! be smart, for example automatically using dual mapping or `MAP_JIT` on hardened environments.
//!
//! - High level API that is provided by \ref JitRuntime, which implements \ref Target interface and uses \ref
//! JitAllocator under the hood. Since \ref JitRuntime inherits from \ref Target it makes it easy to use with
//! \ref CodeHolder. Many AsmJit examples use \ref JitRuntime for its simplicity and easy integration.
//! - \ref JitRuntime provides high level API, which implements \ref Target interface and uses \ref JitAllocator
//! under the hood. Since \ref JitRuntime inherits from \ref Target it makes it easy to use with \ref CodeHolder.
//! Many AsmJit examples use \ref JitRuntime for its simplicity and easy integration.
//!
//! The main difference between \ref VirtMem and \ref JitAllocator is that \ref VirtMem can only be used to allocate
//! whole pages, whereas \ref JitAllocator has `malloc()` like API that allows to allocate smaller quantities that
@@ -1934,81 +1942,78 @@ namespace asmjit {
//! Dual mapping is provided by both \ref VirtMem and \ref JitAllocator.
//! \defgroup asmjit_zone Zone Memory
//! \brief Zone memory allocator and containers.
//! \defgroup asmjit_support Support
//! \brief Provides utility functions, arena allocator, and arena-backed containers.
//!
//! ### Overview
//!
//! AsmJit uses zone memory allocation (also known as Arena allocation) to allocate most of the data it uses. It's a
//! fast allocator that allows AsmJit to allocate a lot of small data structures fast and without `malloc()` overhead.
//! Since code generators and all related classes are usually short-lived this approach decreases memory usage and
//! fragmentation as arena-based allocators always allocate larger blocks of memory, which are then split into smaller
//! chunks.
//! This functionality is primarily intended for AsmJit's internal use, but is exposed to users since it may be used
//! in public headers as well. \ref Arena and arena-backed containers are used by many AsmJit classes, which are public,
//! and AsmJit doesn't try to hide the use.
//!
//! Another advantage of zone memory allocation is that since the whole library uses this strategy it's very easy to
//! deallocate everything that a particular instance is holding by simply releasing the memory the allocator holds.
//! This improves destruction time of such objects as there is no destruction at all. Long-lived objects just reset
//! its data in destructor or in their reset() member function for a future reuse. For this purpose all containers in
//! AsmJit are also zone allocated.
//! The arena allocator is used for most allocations within AsmJit. It is optimized for fast allocation of small objects,
//! avoiding the overhead of `malloc()`. Memory is managed in large blocks that are split into smaller chunks, reducing
//! fragmentation and improving performance.
//!
//! \section zone_allocation Zone Allocation
//! Releasing an arena allocator invalidates memory it holds, allowing efficient cleanup without per-object destruction.
//! Long-lived objects typically reset their data in the destructor or via `reset()` for allocation reuse. All AsmJit
//! containers use \ref Arena allocator.
//!
//! - \ref Zone - Incremental zone memory allocator with minimum features. It can only allocate memory without the
//! possibility to return it back to the allocator.
//! \section arena_allocators Arena Allocators
//!
//! - \ref ZoneTmp - A temporary \ref Zone with some initial static storage. If the allocation requests fit the
//! static storage allocated then there will be no dynamic memory allocation during the lifetime of \ref ZoneTmp,
//! otherwise it would act as \ref Zone with one preallocated block on the stack.
//! - \ref Arena - Arena memory allocator that quickly allocates the requested memory from larger chunks and then
//! frees everything at once. AsmJit uses Arena allocators almost everywhere as almost everything is short-lived.
//!
//! - \ref ZoneAllocator - A wrapper of \ref Zone that provides the capability of returning memory to the allocator.
//! Such memory is stored in a pool for later reuse.
//! - \ref ArenaTmp - A temporary \ref Arena with some initial static storage. If the allocation requests fit the
//! static storage allocated then there will be no dynamic memory allocation during the lifetime of \ref ArenaTmp,
//! otherwise it would act as \ref Arena with one preallocated block at the beginning.
//!
//! \section zone_containers Zone Allocated Containers
//! \section arena_containers Arena-Allocated Containers
//!
//! - \ref ZoneString - Zone allocated string.
//! - \ref ZoneHash - Zone allocated hash table.
//! - \ref ZoneTree - Zone allocated red-black tree.
//! - \ref ZoneList - Zone allocated double-linked list.
//! - \ref ZoneStack - Zone allocated stack.
//! - \ref ZoneVector - Zone allocated vector.
//! - \ref ZoneBitVector - Zone allocated vector of bits.
//! - \ref ArenaString - Arena allocated string.
//! - \ref ArenaHash - Arena allocated hash table.
//! - \ref ArenaTree - Arena allocated red-black tree.
//! - \ref ArenaList - Arena allocated double-linked list.
//! - \ref ArenaVector - Arena allocated vector.
//!
//! \section using_zone_containers Using Zone Allocated Containers
//! \section using_arena_containers Using Arena-Allocated Containers
//!
//! The most common data structure exposed by AsmJit is \ref ZoneVector. It's very similar to `std::vector`, but the
//! implementation doesn't use exceptions and uses the mentioned \ref ZoneAllocator for performance reasons. You don't
//! have to worry about allocations as you should not need to add items to AsmJit's data structures directly as there
//! should be API for all required operations.
//! The most common data structure exposed by AsmJit is \ref ArenaVector. It's very similar to `std::vector`, but the
//! implementation doesn't use exceptions and uses the mentioned \ref Arena allocator for increased performance and
//! decreased memory footprint. You don't have to worry about allocations as you should not need to add items to
//! AsmJit's data structures directly as there should be API for all required operations.
//!
//! The following APIs in \ref CodeHolder returns \ref ZoneVector reference:
//! Most of the time, AsmJit returns a non-owning Span instead of a reference to the allocator when it returns an array
//! of something. For example, the following APIs in \ref CodeHolder return a non-owning \ref Span instance:
//!
//! ```
//! using namespace asmjit;
//!
//! void example(CodeHolder& code) {
//! // Contains all section entries managed by CodeHolder.
//! const ZoneVector<Section*>& sections = code.sections();
//! Span<Section*> sections = code.sections();
//!
//! // Contains all label entries managed by CodeHolder.
//! const ZoneVector<LabelEntry>& labelEntries = code.labelEntries();
//! Span<LabelEntry> label_entries = code.label_entries();
//!
//! // Contains all relocation entries managed by CodeHolder.
//! const ZoneVector<RelocEntry*>& relocEntries = code.relocEntries();
//! Span<RelocEntry*> reloc_entries = code.reloc_entries();
//! }
//! ```
//!
//! \ref ZoneVector has overloaded array access operator to make it possible to access its elements through operator[].
//! Some standard functions like \ref ZoneVector::empty(), \ref ZoneVector::size(), and \ref ZoneVector::data() are
//! \ref Span has overloaded array access operator to make it possible to access its elements through operator[].
//! Some standard functions like \ref ArenaVector::is_empty(), \ref ArenaVector::size(), and \ref ArenaVector::data() are
//! provided as well. Vectors are also iterable through a range-based for loop:
//!
//! ```
//! using namespace asmjit;
//!
//! void example(CodeHolder& code) {
//! for (uint32_t labelId = 0; labelId < code.labelCount(); labelId++) {
//! const LabelEntry& le = code.labelEntry(labelId);
//! if (le.isBound()) {
//! printf("Bound Label #%u at offset=%llu\n", labelId, (unsigned long long)le->offset());
//! Span<LabelEntry> label_entries = code.label_entries();
//! for (size_t label_id = 0; label_id < label_entries.size(); label_id++) {
//! const LabelEntry& le = label_entries[label_id];
//! if (le.is_bound()) {
//! printf("Bound Label #%u at offset=%llu\n", uint32_t(label_id), (unsigned long long)le.offset());
//! }
//! }
//! }
@@ -2016,53 +2021,53 @@ namespace asmjit {
//!
//! \section design_considerations Design Considerations
//!
//! Zone-allocated containers do not store the allocator within the container. This decision was made to reduce the
//! Arena-allocated containers do not store the allocator within the container. This decision was made to reduce the
//! footprint of such containers as AsmJit tooling, especially Compiler's register allocation, may use many instances
//! of such containers to perform code analysis and register allocation.
//!
//! For example to append an item into a \ref ZoneVector it's required to pass the allocator as the first argument,
//! so it can be used in case that the vector needs a reallocation. Such function also returns an error, which must
//! be propagated to the caller.
//! For example to append an item into an \ref ArenaVector it's required to pass the allocator as the first argument,
//! so it can be used in case that the vector needs to grow. Such function also returns an error, which must be
//! propagated to the caller.
//!
//! ```
//! using namespace asmjit
//! using namespace asmjit;
//!
//! Error example(ZoneAllocator* allocator) {
//! ZoneVector<int> vector;
//! Error example(Arena& arena) {
//! ArenaVector<int> vector;
//!
//! // Unfortunately, allocator must be provided to all functions that mutate
//! // Unfortunately, arena must be provided to all functions that mutate
//! // the vector. However, AsmJit users should never need to do this as all
//! // manipulation should be done through public API, which takes care of
//! // that.
//! // this.
//! for (int i = 0; i < 100; i++) {
//! ASMJIT_PROPAGATE(vector.append(allocator, i));
//! ASMJIT_PROPAGATE(vector.append(arena, i));
//! }
//!
//! // By default vector's destructor doesn't release anything as it knows
//! // that its content is zone allocated. However, \ref ZoneVector::release
//! // can be used to explicitly release the vector data to the allocator if
//! // that its content is allocated by Arena. However, \ref ArenaVector::release
//! // can be used to explicitly release the vector data back to the allocator if
//! // necessary
//! vector.release(allocator);
//! vector.release(arena);
//! }
//! ```
//!
//! Containers like \ref ZoneVector also provide a functionality to reserve a certain number of items before any items
//! Containers like \ref ArenaVector also provide a functionality to reserve a certain number of items before any items
//! are added to it. This approach is used internally in most places as it allows to prepare space for data that will
//! be added to some container before the data itself was created.
//!
//! ```
//! using namespace asmjit
//! using namespace asmjit;
//!
//! Error example(ZoneAllocator* allocator) {
//! ZoneVector<int> vector;
//! Error example(Arena& arena) {
//! ArenaVector<int> vector;
//!
//! ASMJIT_PROPAGATE(vector.willGrow(100));
//! ASMJIT_PROPAGATE(vector.reserve_additional(arena, 100));
//! for (int i = 0; i < 100; i++) {
//! // Cannot fail.
//! vector.appendUnsafe(allocator, i);
//! vector.append_unchecked(arena, i);
//! }
//!
//! vector.release(allocator);
//! vector.release(arena);
//! }
//! ```
@@ -2106,6 +2111,9 @@ namespace asmjit {
//! \defgroup asmjit_a64 AArch64 Backend
//! \brief AArch64 backend.
//! \defgroup asmjit_ujit UJIT
//! \brief Universal JIT - abstracts X86|X86_64 and AArch64 code generation.
//! \cond INTERNAL
//! \defgroup asmjit_ra RA
//! \brief Register allocator internals.
@@ -2135,18 +2143,19 @@ namespace asmjit {
#include "core/logger.h"
#include "core/operand.h"
#include "core/osutils.h"
#include "core/span.h"
#include "core/string.h"
#include "core/support.h"
#include "core/target.h"
#include "core/type.h"
#include "core/virtmem.h"
#include "core/zone.h"
#include "core/zonehash.h"
#include "core/zonelist.h"
#include "core/zonetree.h"
#include "core/zonestack.h"
#include "core/zonestring.h"
#include "core/zonevector.h"
#include "core/arena.h"
#include "core/arenahash.h"
#include "core/arenalist.h"
#include "core/arenapool.h"
#include "core/arenatree.h"
#include "core/arenastring.h"
#include "core/arenavector.h"
#include "asmjit-scope-end.h"
#endif // ASMJIT_CORE_H_INCLUDED

View File

@@ -48,7 +48,7 @@
#endif
#include "./api-config.h"
#include "../core/api-config.h"
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(__clang__)
#define ASMJIT_FAVOR_SIZE __attribute__((__optimize__("Os")))

View File

@@ -16,7 +16,7 @@
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 17, 0)
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 18, 0)
//! \def ASMJIT_ABI_NAMESPACE
//!
@@ -27,7 +27,7 @@
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
//! controlled by users. This is useful especially in cases in which some of such library comes from third party.
#if !defined(ASMJIT_ABI_NAMESPACE)
#define ASMJIT_ABI_NAMESPACE v1_17
#define ASMJIT_ABI_NAMESPACE v1_18
#endif // !ASMJIT_ABI_NAMESPACE
//! \}
@@ -54,96 +54,129 @@
// Build Options
// =============
#if defined(_DOXYGEN)
// NOTE: Doxygen cannot document macros that are not defined, that's why we have to define them and then undefine
// them immediately, so it won't use the macros with its own preprocessor.
#ifdef _DOXYGEN
namespace asmjit {
//! \addtogroup asmjit_build
//! \{
//! \def ASMJIT_EMBED
//!
//! Asmjit is embedded, implies \ref ASMJIT_STATIC.
#define ASMJIT_EMBED
#undef ASMJIT_EMBED
//! \def ASMJIT_STATIC
//!
//! Enables static-library build.
#define ASMJIT_STATIC
#undef ASMJIT_STATIC
//! \def ASMJIT_BUILD_DEBUG
//!
//! Defined when AsmJit's build configuration is 'Debug'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_DEBUG
#undef ASMJIT_BUILD_DEBUG
//! \def ASMJIT_BUILD_RELEASE
//!
//! Defined when AsmJit's build configuration is 'Release'.
//!
//! \note Can be defined explicitly to bypass auto-detection.
#define ASMJIT_BUILD_RELEASE
//! Disables deprecated API at compile time (deprecated API won't be available).
#define ASMJIT_NO_DEPRECATED
//! Disables the use of an inline ABI namespace within asmjit namespace (the inline namespace is used as an ABI tag).
#define ASMJIT_NO_ABI_NAMESPACE
//! Disables X86/X64 backends.
#define ASMJIT_NO_X86
//! Disables AArch64 backend.
#define ASMJIT_NO_AARCH64
//! Disables the use of `shm_open` on all targets even when it's supported.
#define ASMJIT_NO_SHM_OPEN
//! Disables JIT memory management and \ref asmjit::JitRuntime.
#define ASMJIT_NO_JIT
//! Disables \ref asmjit::Logger and \ref asmjit::Formatter.
#define ASMJIT_NO_LOGGING
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
//! Disables non-host backends entirely (useful for JIT compilers to minimize the library size).
#define ASMJIT_NO_FOREIGN
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
// Avoid doxygen preprocessor using feature-selection definitions.
#undef ASMJIT_BUILD_EMBED
#undef ASMJIT_BUILD_STATIC
#undef ASMJIT_BUILD_DEBUG
#undef ASMJIT_BUILD_RELEASE
// (keep ASMJIT_NO_DEPRECATED defined, we don't document deprecated APIs).
//! \def ASMJIT_NO_DEPRECATED
//!
//! Disables deprecated API at compile time (deprecated API won't be available).
#define ASMJIT_NO_DEPRECATED
#undef ASMJIT_NO_DEPRECATED
//! \def ASMJIT_NO_ABI_NAMESPACE
//!
//! Disables the use of an inline ABI namespace within asmjit namespace (the inline namespace is used as an ABI tag).
#define ASMJIT_NO_ABI_NAMESPACE
#undef ASMJIT_NO_ABI_NAMESPACE
//! \def ASMJIT_NO_X86
//!
//! Disables X86/X64 backends.
#define ASMJIT_NO_X86
#undef ASMJIT_NO_X86
//! \def ASMJIT_NO_AARCH64
//!
//! Disables AArch64 backend.
#define ASMJIT_NO_AARCH64
#undef ASMJIT_NO_AARCH64
//! \def ASMJIT_NO_SHM_OPEN
//!
//! Disables the use of `shm_open` on all targets even when it's supported.
#define ASMJIT_NO_SHM_OPEN
#undef ASMJIT_NO_SHM_OPEN
//! \def ASMJIT_NO_JIT
//!
//! Disables JIT memory management and \ref asmjit::JitRuntime.
#define ASMJIT_NO_JIT
#undef ASMJIT_NO_JIT
//! \def ASMJIT_NO_LOGGING
//!
//! Disables \ref asmjit::Logger and \ref asmjit::Formatter.
#define ASMJIT_NO_LOGGING
#undef ASMJIT_NO_LOGGING
//! \def ASMJIT_NO_TEXT
//!
//! Disables everything that contains text.
#define ASMJIT_NO_TEXT
#undef ASMJIT_NO_TEXT
//! \def ASMJIT_NO_VALIDATION
//!
//! Disables instruction validation API.
#define ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_VALIDATION
//! \def ASMJIT_NO_INTROSPECTION
//!
//! Disables instruction introspection API.
#define ASMJIT_NO_INTROSPECTION
#undef ASMJIT_NO_INTROSPECTION
//! \def ASMJIT_NO_FOREIGN
//!
//! Disables non-host backends entirely (useful for JIT compilers to minimize the library size).
#define ASMJIT_NO_FOREIGN
#undef ASMJIT_NO_FOREIGN
#undef ASMJIT_NO_JIT
#undef ASMJIT_NO_LOGGING
#undef ASMJIT_NO_TEXT
#undef ASMJIT_NO_VALIDATION
#undef ASMJIT_NO_INTROSPECTION
//! \def ASMJIT_NO_BUILDER
//!
//! Disables \ref asmjit_builder functionality completely.
#define ASMJIT_NO_BUILDER
#undef ASMJIT_NO_BUILDER
//! \def ASMJIT_NO_COMPILER
//!
//! Disables \ref asmjit_compiler functionality completely.
#define ASMJIT_NO_COMPILER
#undef ASMJIT_NO_COMPILER
//! \def ASMJIT_NO_UJIT
//!
//! Disables \ref asmjit_ujit functionality completely.
#define ASMJIT_NO_UJIT
#undef ASMJIT_NO_UJIT
//! \}
} // {asmjit}
#endif // _DOXYGEN
//! \cond
// ASMJIT_NO_BUILDER implies ASMJIT_NO_COMPILER.
#if defined(ASMJIT_NO_BUILDER) && !defined(ASMJIT_NO_COMPILER)
#define ASMJIT_NO_COMPILER
@@ -165,9 +198,14 @@ namespace asmjit {
#undef ASMJIT_NO_INTROSPECTION
#endif
//! \endcond
#endif // _DOXYGEN
// Build Mode
// ==========
//! \cond
// Detect ASMJIT_BUILD_DEBUG and ASMJIT_BUILD_RELEASE if not defined.
#if !defined(ASMJIT_BUILD_DEBUG) && !defined(ASMJIT_BUILD_RELEASE)
#if !defined(NDEBUG)
@@ -176,6 +214,7 @@ namespace asmjit {
#define ASMJIT_BUILD_RELEASE
#endif
#endif
//! \endcond
// Target Architecture Detection
// =============================
@@ -183,33 +222,37 @@ namespace asmjit {
//! \addtogroup asmjit_core
//! \{
#if defined(_DOXYGEN)
//! \def ASMJIT_ARCH_X86
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is X86 (32) or X86_64 (64).
#define ASMJIT_ARCH_X86 __detected_at_runtime__
//! \def ASMJIT_ARCH_ARM
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is ARM (32) or AArch64 (64).
#define ASMJIT_ARCH_ARM __detected_at_runtime__
//! \def ASMJIT_ARCH_MIPS
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is MIPS (32) or MISP64 (64).
#define ASMJIT_ARCH_MIPS __detected_at_runtime__
//! \def ASMJIT_ARCH_RISCV
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is RV32 (32) or RV64 (64).
#define ASMJIT_ARCH_RISCV __detected_at_runtime__
//! \def ASMJIT_ARCH_LA
//!
//! Defined to either 0, 32, or 64 depending on whether the target CPU is 32-bit or 64-bit LoongArch.
#define ASMJIT_ARCH_LA __detected_at_runtime__
//! \def ASMJIT_ARCH_BITS
//!
//! Defined to either 32 or 64 depending on the target.
//! \def ASMJIT_ARCH_LE
//!
//! Defined to 1 if the target architecture is little endian.
//! \def ASMJIT_ARCH_BE
//!
//! Defined to 1 if the target architecture is big endian.
#define ASMJIT_ARCH_BITS __detected_at_runtime__(32 | 64)
//! \def ASMJIT_HAS_HOST_BACKEND
//!
@@ -217,10 +260,9 @@ namespace asmjit {
//!
//! For example if AsmJit is building for x86 or x86_64 architectures and `ASMJIT_NO_X86` is not defined,
//! it would define `ASMJIT_HAS_HOST_BACKEND` when `<asmjit/code.h>` or ``<asmjit/host.h>` is included.
#define ASMJIT_HAS_HOST_BACKEND __detected_at_runtime__
//! \}
//! \cond NONE
#else
#if defined(_M_X64) || defined(__x86_64__)
#define ASMJIT_ARCH_X86 64
@@ -246,15 +288,21 @@ namespace asmjit {
#define ASMJIT_ARCH_MIPS 0
#endif
// NOTE `__riscv` is the correct macro in this case as specified by "RISC-V Toolchain Conventions".
#if (defined(__riscv) || defined(__riscv__)) && defined(__riscv_xlen)
// NOTE `__riscv` is the correct macro in this case as specified by "RISC-V Toolchain Conventions".
#define ASMJIT_ARCH_RISCV __riscv_xlen
#else
#define ASMJIT_ARCH_RISCV 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS | ASMJIT_ARCH_RISCV)
#if ASMJIT_ARCH_BITS == 0
#if defined(__loongarch__) && defined(__loongarch_grlen)
#define ASMJIT_ARCH_LA __loongarch_grlen
#else
#define ASMJIT_ARCH_LA 0
#endif
#define ASMJIT_ARCH_BITS (ASMJIT_ARCH_X86 | ASMJIT_ARCH_ARM | ASMJIT_ARCH_MIPS | ASMJIT_ARCH_RISCV | ASMJIT_ARCH_LA)
#if ASMJIT_ARCH_BITS == 0 && !defined(_DOXYGEN)
#undef ASMJIT_ARCH_BITS
#if defined(__LP64__) || defined(_LP64)
#define ASMJIT_ARCH_BITS 64
@@ -263,16 +311,6 @@ namespace asmjit {
#endif
#endif
#if (defined(__ARMEB__)) || \
(defined(__MIPSEB__)) || \
(defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__))
#define ASMJIT_ARCH_LE 0
#define ASMJIT_ARCH_BE 1
#else
#define ASMJIT_ARCH_LE 1
#define ASMJIT_ARCH_BE 0
#endif
#if defined(ASMJIT_NO_FOREIGN)
#if !ASMJIT_ARCH_X86 && !defined(ASMJIT_NO_X86)
#define ASMJIT_NO_X86
@@ -301,7 +339,9 @@ namespace asmjit {
#endif
#endif
//! \endcond
#endif // _DOXYGEN
//! \}
// C++ Compiler and Features Detection
// ===================================
@@ -318,9 +358,11 @@ namespace asmjit {
//! \addtogroup asmjit_core
//! \{
#if defined(_DOXYGEN)
//! \def ASMJIT_API
//!
//! A decorator that is used to decorate API that AsmJit exports when built as a shared library.
#define ASMJIT_API
//! \def ASMJIT_VIRTAPI
//!
@@ -328,54 +370,89 @@ namespace asmjit {
//! is unwanted in most projects. MSVC automatically exports typeinfo and vtable if at least one symbol of the class
//! is exported. However, GCC has some strange behavior that even if one or more symbol is exported it doesn't export
//! typeinfo unless the class itself is decorated with "visibility(default)" (i.e. ASMJIT_API).
#define ASMJIT_VIRTAPI
//! \def ASMJIT_INLINE
//!
//! Decorator to force inlining of functions, uses either `__attribute__((__always_inline__))` or __forceinline,
//! depending on C++ compiler.
#define ASMJIT_INLINE inline
//! \def ASMJIT_INLINE_NODEBUG
//!
//! Like \ref ASMJIT_INLINE, but uses additionally `__nodebug__` or `__artificial__` attribute to make the
//! debugging of some AsmJit functions easier, especially getters and one-line abstractions where usually you don't
//! want to step in.
#define ASMJIT_INLINE_NODEBUG inline
//! \def ASMJIT_INLINE_CONSTEXPR
//!
//! Like \ref ASMJIT_INLINE_NODEBUG, but having an additional `constexpr` attribute.
#define ASMJIT_INLINE_CONSTEXPR inline constexpr
//! \def ASMJIT_NOINLINE
//!
//! Decorator to avoid inlining of functions, uses either `__attribute__((__noinline__))` or `__declspec(noinline)`
//! depending on C++ compiler.
#define ASMJIT_NOINLINE
//! \def ASMJIT_CDECL
//!
//! CDECL function attribute - either `__attribute__((__cdecl__))` or `__cdecl`.
#define ASMJIT_CDECL
//! \def ASMJIT_STDCALL
//!
//! STDCALL function attribute - either `__attribute__((__stdcall__))` or `__stdcall`.
//!
//! \note This expands to nothing on non-x86 targets as STDCALL is X86 specific.
#define ASMJIT_STDCALL
//! \def ASMJIT_FASTCALL
//!
//! FASTCALL function attribute - either `__attribute__((__fastcall__))` or `__fastcall`.
//!
//! \note Expands to nothing on non-x86 targets as FASTCALL is X86 specific.
#define ASMJIT_FASTCALL
//! \def ASMJIT_REGPARM(N)
//!
//! Expands to `__attribute__((__regparm__(N)))` when compiled by GCC or clang, nothing otherwise.
#define ASMJIT_REGPARM(N)
//! \def ASMJIT_VECTORCALL
//!
//! VECTORCALL function attribute - either `__attribute__((__vectorcall__))` or `__vectorcall`.
//!
//! \note Expands to nothing on non-x86 targets as VECTORCALL is X86 specific.
#define ASMJIT_VECTORCALL
//! \}
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#define ASMJIT_MAY_ALIAS
//! \def ASMJIT_ASSUME(...)
//!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
//!
//! This macro has two purposes:
//!
//! 1. Enable optimizations that would not be possible without the assumption.
//! 2. Hint static analysis tools that a certain condition is true to prevent false positives.
#define ASMJIT_ASSUME(...)
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
#define ASMJIT_LIKELY(...)
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#define ASMJIT_UNLIKELY(...)
#else
// API (Export / Import).
#if !defined(ASMJIT_STATIC)
@@ -411,18 +488,18 @@ namespace asmjit {
#endif
// Function attributes.
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__)
#if !defined(ASMJIT_BUILD_DEBUG) && defined(__GNUC__) && !defined(_DOXYGEN)
#define ASMJIT_INLINE inline __attribute__((__always_inline__))
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER)
#elif !defined(ASMJIT_BUILD_DEBUG) && defined(_MSC_VER) && !defined(_DOXYGEN)
#define ASMJIT_INLINE __forceinline
#else
#define ASMJIT_INLINE inline
#endif
#if defined(__clang__)
#if defined(__clang__) && !defined(_DOXYGEN)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __nodebug__))
#elif defined(__GNUC__)
#elif defined(__GNUC__) && !defined(_DOXYGEN)
#define ASMJIT_INLINE_NODEBUG inline __attribute__((__always_inline__, __artificial__))
#else
#define ASMJIT_INLINE_NODEBUG inline
@@ -464,18 +541,14 @@ namespace asmjit {
#define ASMJIT_VECTORCALL
#endif
// Type alignment (not allowed by C++17 'alignas' keyword).
#if defined(__GNUC__)
#define ASMJIT_ALIGN_TYPE(N, ...) __attribute__((__aligned__(N))) __VA_ARGS__
#define ASMJIT_ALIGNAS(ALIGNMENT) __attribute__((__aligned__(ALIGNMENT)))
#elif defined(_MSC_VER)
#define ASMJIT_ALIGN_TYPE(N, ...) __declspec(align(N)) __VA_ARGS__
#define ASMJIT_ALIGNAS(ALIGNMENT) __declspec(align(ALIGNMENT))
#else
#define ASMJIT_ALIGN_TYPE(N, ...) __VA_ARGS__
#define ASMJIT_ALIGNAS(ALIGNMENT) alignas(ALIGNMENT)
#endif
//! \def ASMJIT_MAY_ALIAS
//!
//! Expands to `__attribute__((__may_alias__))` if supported.
#if defined(__GNUC__)
#define ASMJIT_MAY_ALIAS __attribute__((__may_alias__))
#else
@@ -491,14 +564,6 @@ namespace asmjit {
#define ASMJIT_NONNULL(FUNCTION_ARGUMENT) FUNCTION_ARGUMENT
#endif
//! \def ASMJIT_ASSUME(...)
//!
//! Macro that tells the C/C++ compiler that the expression `...` evaluates to true.
//!
//! This macro has two purposes:
//!
//! 1. Enable optimizations that would not be possible without the assumption.
//! 2. Hint static analysis tools that a certain condition is true to prevent false positives.
#if defined(__clang__)
#define ASMJIT_ASSUME(...) __builtin_assume(__VA_ARGS__)
#elif defined(__GNUC__)
@@ -509,13 +574,6 @@ namespace asmjit {
#define ASMJIT_ASSUME(...) (void)0
#endif
//! \def ASMJIT_LIKELY(...)
//!
//! Condition is likely to be taken (mostly error handling and edge cases).
//! \def ASMJIT_UNLIKELY(...)
//!
//! Condition is unlikely to be taken (mostly error handling and edge cases).
#if defined(__GNUC__)
#define ASMJIT_LIKELY(...) __builtin_expect(!!(__VA_ARGS__), 1)
#define ASMJIT_UNLIKELY(...) __builtin_expect(!!(__VA_ARGS__), 0)
@@ -536,6 +594,10 @@ namespace asmjit {
#define ASMJIT_ATTRIBUTE_NO_SANITIZE_UNDEF
#endif
#endif // _DOXYGEN
//! \}
// Diagnostic Macros
// ======================================
@@ -589,7 +651,7 @@ namespace asmjit {
//! \def ASMJIT_DEFINE_ENUM_FLAGS(T)
//!
//! Defines bit operations for enumeration flags.
#ifdef _DOXYGEN
#if defined(_DOXYGEN)
#define ASMJIT_DEFINE_ENUM_FLAGS(T)
#else
#define ASMJIT_DEFINE_ENUM_FLAGS(T) \
@@ -624,7 +686,7 @@ namespace asmjit {
//! \def ASMJIT_DEFINE_ENUM_COMPARE(T)
//!
//! Defines comparison operations for enumeration flags.
#if defined(_DOXYGEN) || (defined(_MSC_VER) && _MSC_VER <= 1900)
#if defined(_DOXYGEN)
#define ASMJIT_DEFINE_ENUM_COMPARE(T)
#else
#define ASMJIT_DEFINE_ENUM_COMPARE(T) \
@@ -642,56 +704,4 @@ namespace asmjit {
}
#endif
//! Defines a strong type `C` that wraps a value of `T`.
#define ASMJIT_DEFINE_STRONG_TYPE(C, T) \
struct C { \
T v; \
\
ASMJIT_INLINE_NODEBUG C() = default; \
ASMJIT_INLINE_CONSTEXPR explicit C(T x) noexcept : v(x) {} \
ASMJIT_INLINE_CONSTEXPR C(const C& other) noexcept = default; \
\
ASMJIT_INLINE_CONSTEXPR T value() const noexcept { return v; } \
\
ASMJIT_INLINE_CONSTEXPR T* valuePtr() noexcept { return &v; } \
ASMJIT_INLINE_CONSTEXPR const T* valuePtr() const noexcept { return &v; } \
\
ASMJIT_INLINE_CONSTEXPR C& operator=(T x) noexcept { v = x; return *this; }; \
ASMJIT_INLINE_CONSTEXPR C& operator=(const C& x) noexcept { v = x.v; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR C operator+(T x) const noexcept { return C(v + x); } \
ASMJIT_INLINE_CONSTEXPR C operator-(T x) const noexcept { return C(v - x); } \
ASMJIT_INLINE_CONSTEXPR C operator*(T x) const noexcept { return C(v * x); } \
ASMJIT_INLINE_CONSTEXPR C operator/(T x) const noexcept { return C(v / x); } \
\
ASMJIT_INLINE_CONSTEXPR C operator+(const C& x) const noexcept { return C(v + x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator-(const C& x) const noexcept { return C(v - x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator*(const C& x) const noexcept { return C(v * x.v); } \
ASMJIT_INLINE_CONSTEXPR C operator/(const C& x) const noexcept { return C(v / x.v); } \
\
ASMJIT_INLINE_CONSTEXPR C& operator+=(T x) noexcept { v += x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator-=(T x) noexcept { v -= x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator*=(T x) noexcept { v *= x; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator/=(T x) noexcept { v /= x; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR C& operator+=(const C& x) noexcept { v += x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator-=(const C& x) noexcept { v -= x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator*=(const C& x) noexcept { v *= x.v; return *this; } \
ASMJIT_INLINE_CONSTEXPR C& operator/=(const C& x) noexcept { v /= x.v; return *this; } \
\
ASMJIT_INLINE_CONSTEXPR bool operator==(T x) const noexcept { return v == x; } \
ASMJIT_INLINE_CONSTEXPR bool operator!=(T x) const noexcept { return v != x; } \
ASMJIT_INLINE_CONSTEXPR bool operator> (T x) const noexcept { return v > x; } \
ASMJIT_INLINE_CONSTEXPR bool operator>=(T x) const noexcept { return v >= x; } \
ASMJIT_INLINE_CONSTEXPR bool operator< (T x) const noexcept { return v < x; } \
ASMJIT_INLINE_CONSTEXPR bool operator<=(T x) const noexcept { return v <= x; } \
\
ASMJIT_INLINE_CONSTEXPR bool operator==(const C& x) const noexcept { return v == x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator!=(const C& x) const noexcept { return v != x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator> (const C& x) const noexcept { return v > x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator>=(const C& x) const noexcept { return v >= x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator< (const C& x) const noexcept { return v < x.v; } \
ASMJIT_INLINE_CONSTEXPR bool operator<=(const C& x) const noexcept { return v <= x.v; } \
};
#endif // ASMJIT_CORE_API_CONFIG_H_INCLUDED

View File

@@ -80,7 +80,7 @@ enum class CondCode : uint8_t {
//! \cond
static constexpr CondCode _reverseCondTable[] = {
static constexpr CondCode _reverse_cond_table[] = {
CondCode::kAL, // AL <- AL
CondCode::kNA, // NA <- NA
CondCode::kEQ, // EQ <- EQ
@@ -102,11 +102,11 @@ static constexpr CondCode _reverseCondTable[] = {
//! Reverses a condition code (reverses the corresponding operands of a comparison).
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode reverseCond(CondCode cond) noexcept { return _reverseCondTable[uint8_t(cond)]; }
static ASMJIT_INLINE_CONSTEXPR CondCode reverse_cond(CondCode cond) noexcept { return _reverse_cond_table[uint8_t(cond)]; }
//! Negates a condition code.
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR CondCode negateCond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
static ASMJIT_INLINE_CONSTEXPR CondCode negate_cond(CondCode cond) noexcept { return CondCode(uint8_t(cond) ^ uint8_t(1)); }
//! Memory offset mode.
//!
@@ -195,14 +195,14 @@ public:
ASMJIT_INLINE_CONSTEXPR ShiftOp op() const noexcept { return _op; }
//! Sets shift operation to `op`.
ASMJIT_INLINE_NODEBUG void setOp(ShiftOp op) noexcept { _op = op; }
ASMJIT_INLINE_NODEBUG void set_pp(ShiftOp op) noexcept { _op = op; }
//! Returns the shift amount.
[[nodiscard]]
ASMJIT_INLINE_CONSTEXPR uint32_t value() const noexcept { return _value; }
//! Sets shift amount to `value`.
ASMJIT_INLINE_NODEBUG void setValue(uint32_t value) noexcept { _value = value; }
ASMJIT_INLINE_NODEBUG void set_value(uint32_t value) noexcept { _value = value; }
};
//! \}
@@ -254,7 +254,7 @@ enum class DataType : uint32_t {
kMaxValue = 15
};
static ASMJIT_INLINE_NODEBUG uint32_t dataTypeSize(DataType dt) noexcept {
static ASMJIT_INLINE_NODEBUG uint32_t data_type_size(DataType dt) noexcept {
static constexpr uint8_t table[] = { 0, 1, 2, 4, 8, 1, 2, 4, 8, 2, 4, 8, 1, 2, 8 };
return table[size_t(dt)];
}

View File

@@ -18,7 +18,7 @@
ASMJIT_BEGIN_NAMESPACE
static const constexpr ArchTraits noArchTraits = {
static const constexpr ArchTraits no_arch_traits = {
// SP/FP/LR/PC.
0xFFu, 0xFFu, 0xFFu, 0xFFu,
@@ -56,112 +56,112 @@ static const constexpr ArchTraits noArchTraits = {
}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1] = {
ASMJIT_VARAPI const ArchTraits _arch_traits[uint32_t(Arch::kMaxValue) + 1] = {
// No architecture.
noArchTraits,
no_arch_traits,
// X86/X86 architectures.
#if !defined(ASMJIT_NO_X86)
x86::x86ArchTraits,
x86::x64ArchTraits,
x86::x86_arch_traits,
x86::x64_arch_traits,
#else
noArchTraits,
noArchTraits,
no_arch_traits,
no_arch_traits,
#endif
// RISCV32/RISCV64 architectures.
noArchTraits,
noArchTraits,
no_arch_traits,
no_arch_traits,
// ARM architecture
noArchTraits,
no_arch_traits,
// AArch64 architecture.
#if !defined(ASMJIT_NO_AARCH64)
a64::a64ArchTraits,
a64::a64_arch_traits,
#else
noArchTraits,
no_arch_traits,
#endif
// ARM/Thumb architecture.
noArchTraits,
no_arch_traits,
// Reserved.
noArchTraits,
no_arch_traits,
// MIPS32/MIPS64
noArchTraits,
noArchTraits
no_arch_traits,
no_arch_traits
};
ASMJIT_FAVOR_SIZE Error ArchUtils::typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
ASMJIT_FAVOR_SIZE Error ArchUtils::type_id_to_reg_signature(Arch arch, TypeId type_id, Out<TypeId> type_id_out, Out<OperandSignature> reg_signature_out) noexcept {
const ArchTraits& arch_traits = ArchTraits::by_arch(arch);
// TODO: Remove this, should never be used like this.
// Passed RegType instead of TypeId?
if (uint32_t(typeId) <= uint32_t(RegType::kMaxValue)) {
typeId = RegUtils::typeIdOf(RegType(uint32_t(typeId)));
if (uint32_t(type_id) <= uint32_t(RegType::kMaxValue)) {
type_id = RegUtils::type_id_of(RegType(uint32_t(type_id)));
}
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(typeId))) {
return DebugUtils::errored(kErrorInvalidTypeId);
if (ASMJIT_UNLIKELY(!TypeUtils::is_valid(type_id))) {
return make_error(Error::kInvalidTypeId);
}
// First normalize architecture dependent types.
if (TypeUtils::isAbstract(typeId)) {
bool is32Bit = Environment::is32Bit(arch);
if (typeId == TypeId::kIntPtr) {
typeId = is32Bit ? TypeId::kInt32 : TypeId::kInt64;
if (TypeUtils::is_abstract(type_id)) {
bool is_32bit = Environment::is_32bit(arch);
if (type_id == TypeId::kIntPtr) {
type_id = is_32bit ? TypeId::kInt32 : TypeId::kInt64;
}
else {
typeId = is32Bit ? TypeId::kUInt32 : TypeId::kUInt64;
type_id = is_32bit ? TypeId::kUInt32 : TypeId::kUInt64;
}
}
// Type size helps to construct all groups of registers.
// TypeId is invalid if the size is zero.
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t size = TypeUtils::size_of(type_id);
if (ASMJIT_UNLIKELY(!size)) {
return DebugUtils::errored(kErrorInvalidTypeId);
return make_error(Error::kInvalidTypeId);
}
if (ASMJIT_UNLIKELY(typeId == TypeId::kFloat80)) {
return DebugUtils::errored(kErrorInvalidUseOfF80);
if (ASMJIT_UNLIKELY(type_id == TypeId::kFloat80)) {
return make_error(Error::kInvalidUseOfF80);
}
RegType regType = RegType::kNone;
if (TypeUtils::isBetween(typeId, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
regType = archTraits._typeIdToRegType[uint32_t(typeId) - uint32_t(TypeId::_kBaseStart)];
if (regType == RegType::kNone) {
if (typeId == TypeId::kInt64 || typeId == TypeId::kUInt64) {
return DebugUtils::errored(kErrorInvalidUseOfGpq);
RegType reg_type = RegType::kNone;
if (TypeUtils::is_between(type_id, TypeId::_kBaseStart, TypeId::_kVec32Start)) {
reg_type = arch_traits._type_id_to_reg_type[uint32_t(type_id) - uint32_t(TypeId::_kBaseStart)];
if (reg_type == RegType::kNone) {
if (type_id == TypeId::kInt64 || type_id == TypeId::kUInt64) {
return make_error(Error::kInvalidUseOfGpq);
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
return make_error(Error::kInvalidTypeId);
}
}
}
else {
if (size <= 8 && archTraits.hasRegType(RegType::kVec64)) {
regType = RegType::kVec64;
if (size <= 8 && arch_traits.has_reg_type(RegType::kVec64)) {
reg_type = RegType::kVec64;
}
else if (size <= 16 && archTraits.hasRegType(RegType::kVec128)) {
regType = RegType::kVec128;
else if (size <= 16 && arch_traits.has_reg_type(RegType::kVec128)) {
reg_type = RegType::kVec128;
}
else if (size == 32 && archTraits.hasRegType(RegType::kVec256)) {
regType = RegType::kVec256;
else if (size == 32 && arch_traits.has_reg_type(RegType::kVec256)) {
reg_type = RegType::kVec256;
}
else if (archTraits.hasRegType(RegType::kVec512)) {
regType = RegType::kVec512;
else if (arch_traits.has_reg_type(RegType::kVec512)) {
reg_type = RegType::kVec512;
}
else {
return DebugUtils::errored(kErrorInvalidTypeId);
return make_error(Error::kInvalidTypeId);
}
}
*typeIdOut = typeId;
*regSignatureOut = RegUtils::signatureOf(regType);
return kErrorOk;
*type_id_out = type_id;
*reg_signature_out = RegUtils::signature_of(reg_type);
return Error::kOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -37,7 +37,8 @@ enum class Arch : uint8_t {
//! 32-bit ARM ISA in Thumb mode (little endian).
kThumb = 7,
// 8 is not used at the moment, even numbers are 64-bit architectures.
//! 64-bit LoongArch.
kLA64 = 8,
//! 32-bit MIPS ISA in (little endian).
kMIPS32_LE = 9,
@@ -51,8 +52,6 @@ enum class Arch : uint8_t {
//! 32-bit ARM ISA in Thumb mode (big endian).
kThumb_BE = 13,
// 14 is not used at the moment, even numbers are 64-bit architectures.
//! 32-bit MIPS ISA in (big endian).
kMIPS32_BE = 15,
//! 64-bit MIPS ISA in (big endian).
@@ -77,15 +76,17 @@ enum class Arch : uint8_t {
ASMJIT_ARCH_RISCV == 32 ? kRISCV32 :
ASMJIT_ARCH_RISCV == 64 ? kRISCV64 :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_LE ? kARM :
ASMJIT_ARCH_ARM == 32 && ASMJIT_ARCH_BE ? kARM_BE :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_LE ? kAArch64 :
ASMJIT_ARCH_ARM == 64 && ASMJIT_ARCH_BE ? kAArch64_BE :
ASMJIT_ARCH_LA == 64 ? kLA64 :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_LE ? kMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && ASMJIT_ARCH_BE ? kMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_LE ? kMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && ASMJIT_ARCH_BE ? kMIPS64_BE :
ASMJIT_ARCH_ARM == 32 && Support::ByteOrder::kNative == Support::ByteOrder::kLE ? kARM :
ASMJIT_ARCH_ARM == 32 && Support::ByteOrder::kNative == Support::ByteOrder::kBE ? kARM_BE :
ASMJIT_ARCH_ARM == 64 && Support::ByteOrder::kNative == Support::ByteOrder::kLE ? kAArch64 :
ASMJIT_ARCH_ARM == 64 && Support::ByteOrder::kNative == Support::ByteOrder::kBE ? kAArch64_BE :
ASMJIT_ARCH_MIPS == 32 && Support::ByteOrder::kNative == Support::ByteOrder::kLE ? kMIPS32_LE :
ASMJIT_ARCH_MIPS == 32 && Support::ByteOrder::kNative == Support::ByteOrder::kBE ? kMIPS32_BE :
ASMJIT_ARCH_MIPS == 64 && Support::ByteOrder::kNative == Support::ByteOrder::kLE ? kMIPS64_LE :
ASMJIT_ARCH_MIPS == 64 && Support::ByteOrder::kNative == Support::ByteOrder::kBE ? kMIPS64_BE :
kUnknown
#endif
@@ -168,35 +169,35 @@ struct ArchTraits {
//! \{
//! Stack pointer register id.
uint8_t _spRegId;
uint8_t _sp_reg_id;
//! Frame pointer register id.
uint8_t _fpRegId;
uint8_t _fp_reg_id;
//! Link register id.
uint8_t _linkRegId;
uint8_t _link_reg_id;
//! Instruction pointer (or program counter) register id, if accessible.
uint8_t _pcRegId;
uint8_t _pc_reg_id;
// Reserved.
uint8_t _reserved[3];
//! Hardware stack alignment requirement.
uint8_t _hwStackAlignment;
uint8_t _hw_stack_alignment;
//! Minimum addressable offset on stack guaranteed for all instructions.
uint32_t _minStackOffset;
uint32_t _min_stack_offset;
//! Maximum addressable offset on stack depending on specific instruction.
uint32_t _maxStackOffset;
uint32_t _max_stack_offset;
//! Bit-mask indexed by \ref RegType that describes, which register types are supported by the ISA.
uint32_t _supportedRegTypes;
uint32_t _supported_reg_types;
//! Flags for each virtual register group.
Support::Array<InstHints, Globals::kNumVirtGroups> _instHints;
Support::Array<InstHints, Globals::kNumVirtGroups> _inst_hints;
//! Maps scalar TypeId values (from TypeId::_kIdBaseStart) to register types, see \ref TypeId.
Support::Array<RegType, 32> _typeIdToRegType;
Support::Array<RegType, 32> _type_id_to_reg_type;
//! Word name identifiers of 8-bit, 16-bit, 32-bit, and 64-bit quantities that appear in formatted text.
ArchTypeNameId _typeNameIdTable[4];
ArchTypeNameId _type_name_id_table[4];
//! \}
@@ -205,62 +206,62 @@ struct ArchTraits {
//! Returns stack pointer register id (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t spRegId() const noexcept { return _spRegId; }
ASMJIT_INLINE_NODEBUG uint32_t sp_reg_id() const noexcept { return _sp_reg_id; }
//! Returns stack frame register id (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t fpRegId() const noexcept { return _fpRegId; }
ASMJIT_INLINE_NODEBUG uint32_t fp_reg_id() const noexcept { return _fp_reg_id; }
//! Returns link register id, if the architecture provides it (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t linkRegId() const noexcept { return _linkRegId; }
ASMJIT_INLINE_NODEBUG uint32_t link_reg_id() const noexcept { return _link_reg_id; }
//! Returns program counter register id, if the architecture exposes it (always GP register).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t pcRegId() const noexcept { return _pcRegId; }
ASMJIT_INLINE_NODEBUG uint32_t pc_reg_id() const noexcept { return _pc_reg_id; }
//! Returns a hardware stack alignment requirement.
//!
//! \note This is a hardware constraint. Architectures that don't constrain it would return the lowest alignment
//! (1), however, some architectures may constrain the alignment, for example AArch64 requires 16-byte alignment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t hwStackAlignment() const noexcept { return _hwStackAlignment; }
ASMJIT_INLINE_NODEBUG uint32_t hw_stack_alignment() const noexcept { return _hw_stack_alignment; }
//! Tests whether the architecture provides link register, which is used across function calls. If the link
//! register is not provided then a function call pushes the return address on stack (X86/X64).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLinkReg() const noexcept { return _linkRegId != Reg::kIdBad; }
ASMJIT_INLINE_NODEBUG bool has_link_reg() const noexcept { return _link_reg_id != Reg::kIdBad; }
//! Returns minimum addressable offset on stack guaranteed for all instructions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t minStackOffset() const noexcept { return _minStackOffset; }
ASMJIT_INLINE_NODEBUG uint32_t min_stack_offset() const noexcept { return _min_stack_offset; }
//! Returns maximum addressable offset on stack depending on specific instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t maxStackOffset() const noexcept { return _maxStackOffset; }
ASMJIT_INLINE_NODEBUG uint32_t max_stack_offset() const noexcept { return _max_stack_offset; }
//! Returns ISA flags of the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstHints instFeatureHints(RegGroup group) const noexcept { return _instHints[group]; }
ASMJIT_INLINE_NODEBUG InstHints inst_feature_hints(RegGroup group) const noexcept { return _inst_hints[group]; }
//! Tests whether the given register `group` has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstHint(RegGroup group, InstHints feature) const noexcept { return Support::test(_instHints[group], feature); }
ASMJIT_INLINE_NODEBUG bool has_inst_hint(RegGroup group, InstHints feature) const noexcept { return Support::test(_inst_hints[group], feature); }
//! Tests whether the ISA provides register swap instruction for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstRegSwap(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kRegSwap); }
ASMJIT_INLINE_NODEBUG bool has_inst_reg_swap(RegGroup group) const noexcept { return has_inst_hint(group, InstHints::kRegSwap); }
//! Tests whether the ISA provides push/pop instructions for the given register `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstPushPop(RegGroup group) const noexcept { return hasInstHint(group, InstHints::kPushPop); }
ASMJIT_INLINE_NODEBUG bool has_inst_push_pop(RegGroup group) const noexcept { return has_inst_hint(group, InstHints::kPushPop); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRegType(RegType type) const noexcept {
ASMJIT_INLINE_NODEBUG bool has_reg_type(RegType type) const noexcept {
if (ASMJIT_UNLIKELY(type > RegType::kMaxValue)) {
type = RegType::kNone;
}
return Support::bitTest(_supportedRegTypes, uint32_t(type));
return Support::bit_test(_supported_reg_types, uint32_t(type));
}
//! Returns a table of ISA word names that appear in formatted text. Word names are ISA dependent.
@@ -271,11 +272,11 @@ struct ArchTraits {
//! - [2] 32-bits
//! - [3] 64-bits
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* typeNameIdTable() const noexcept { return _typeNameIdTable; }
ASMJIT_INLINE_NODEBUG const ArchTypeNameId* type_name_id_table() const noexcept { return _type_name_id_table; }
//! Returns an ISA word name identifier of the given `index`, see \ref typeNameIdTable() for more details.
//! Returns an ISA word name identifier of the given `index`, see \ref type_name_id_table() for more details.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArchTypeNameId typeNameIdByIndex(uint32_t index) const noexcept { return _typeNameIdTable[index]; }
ASMJIT_INLINE_NODEBUG ArchTypeNameId type_name_id_by_index(uint32_t index) const noexcept { return _type_name_id_table[index]; }
//! \}
@@ -284,21 +285,21 @@ struct ArchTraits {
//! Returns a const reference to `ArchTraits` for the given architecture `arch`.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG const ArchTraits& byArch(Arch arch) noexcept;
static ASMJIT_INLINE_NODEBUG const ArchTraits& by_arch(Arch arch) noexcept;
//! \}
};
ASMJIT_VARAPI const ArchTraits _archTraits[uint32_t(Arch::kMaxValue) + 1];
ASMJIT_VARAPI const ArchTraits _arch_traits[uint32_t(Arch::kMaxValue) + 1];
//! \cond
ASMJIT_INLINE_NODEBUG const ArchTraits& ArchTraits::byArch(Arch arch) noexcept { return _archTraits[uint32_t(arch)]; }
ASMJIT_INLINE_NODEBUG const ArchTraits& ArchTraits::by_arch(Arch arch) noexcept { return _arch_traits[uint32_t(arch)]; }
//! \endcond
//! Architecture utilities.
namespace ArchUtils {
ASMJIT_API Error typeIdToRegSignature(Arch arch, TypeId typeId, TypeId* typeIdOut, OperandSignature* regSignatureOut) noexcept;
ASMJIT_API Error type_id_to_reg_signature(Arch arch, TypeId type_id, Out<TypeId> type_id_out, Out<OperandSignature> reg_signature_out) noexcept;
} // {ArchUtils}

515
src/asmjit/core/arena.cpp Normal file
View File

@@ -0,0 +1,515 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/arena.h"
ASMJIT_BEGIN_NAMESPACE
// Arena - Globals
// ===============
// Overhead of block alignment (we want to achieve at least Arena::kAlignment).
static constexpr size_t kArenaAlignmentOverhead =
(Arena::kAlignment <= Globals::kAllocAlignment)
? size_t(0)
: Arena::kAlignment - Globals::kAllocAlignment;
// Zero size block used by `Arena` that doesn't have any memory allocated. Should be allocated in read-only memory,
// which would prevent it from being modified.
static const Arena::ManagedBlock _arena_zero_block {};
static ASMJIT_INLINE Arena::ManagedBlock* Arena_get_zero_block() noexcept {
return const_cast<Arena::ManagedBlock*>(&_arena_zero_block);
}
static ASMJIT_INLINE void* Arena_malloc(size_t size) noexcept {
return ::malloc(size);
}
static ASMJIT_INLINE void Arena_free(void* ptr) noexcept {
::free(ptr);
}
static ASMJIT_INLINE void Arena_assign_block(Arena& arena, Arena::ManagedBlock* block) noexcept {
arena._ptr = Support::align_up(block->data(), Arena::kAlignment);
arena._end = block->end();
arena._current_block = block;
ASMJIT_ASSERT(arena._ptr <= arena._end);
}
// This is only used in debug mode to verify that the Arena is used properly.
[[maybe_unused]]
static bool Arena_has_dynamic_block(Arena& arena, Arena::DynamicBlock* block) noexcept {
Arena::DynamicBlock* current = arena._dynamic_blocks;
while (current) {
if (current == block) {
return true;
}
current = current->next;
}
return false;
}
// Arena - Initialization & Reset
// ==============================
void Arena::_init(size_t min_block_size, Span<uint8_t> static_arena_memory) noexcept {
ASMJIT_ASSERT(min_block_size >= kMinManagedBlockSize);
ASMJIT_ASSERT(min_block_size <= kMaxManagedBlockSize);
ManagedBlock* block = Arena_get_zero_block();
size_t block_size_shift = Support::bit_size_of<size_t> - Support::clz(min_block_size);
_current_block_size_shift = uint8_t(block_size_shift);
_min_block_size_shift = uint8_t(block_size_shift);
_max_block_size_shift = uint8_t(26); // (1 << 26) Equals 64 MiB blocks.
_has_static_block = uint8_t(static_arena_memory.size() != 0u);
_unused_byte_count = 0u;
// Setup the first [temporary] block, if necessary.
if (static_arena_memory.size()) {
block = reinterpret_cast<ManagedBlock*>(static_arena_memory.data());
block->next = nullptr;
ASMJIT_ASSERT(static_arena_memory.size() >= sizeof(ManagedBlock));
block->size = static_arena_memory.size() - sizeof(ManagedBlock);
}
_first_block = block;
Arena_assign_block(*this, block);
}
void Arena::reset(ResetPolicy reset_policy) noexcept {
ManagedBlock* first = _first_block;
if (reset_policy == ResetPolicy::kHard) {
ManagedBlock* current = first;
if (first == &_arena_zero_block) {
return;
}
if (has_static_block()) {
current = current->next;
first->next = nullptr;
}
else {
first = Arena_get_zero_block();
_first_block = first;
}
if (current) {
do {
ManagedBlock* next = current->next;
Arena_free(current);
current = next;
} while (current);
}
_current_block_size_shift = _min_block_size_shift;
}
// Free dynamic blocks.
{
DynamicBlock* current = _dynamic_blocks;
while (current) {
DynamicBlock* next = current->next;
Arena_free(current);
current = next;
}
memset(_reusable_slots, 0, sizeof(_reusable_slots));
_dynamic_blocks = nullptr;
}
Arena_assign_block(*this, first);
_unused_byte_count = 0u;
}
// Arena - Utilities
// =================
static ASMJIT_NOINLINE void Arena_make_block_leftover_reusable(Arena& arena, uint8_t* ptr, size_t size) noexcept {
while (size >= Arena::kMinReusableSlotSize) {
size_t saved_slot {};
size_t saved_size {};
// We would always have a slot if we have obtained a slot `size` as `remain < size`.
if (!Arena::_get_reusable_slot_index(size / 2u, Out(saved_slot), Out(saved_size))) {
saved_slot = Arena::kReusableSlotCount - 1u;
saved_size = Arena::kMaxReusableSlotSize;
}
reinterpret_cast<Arena::ReusableSlot*>(ptr)->next = arena._reusable_slots[saved_slot];
arena._reusable_slots[saved_slot] = reinterpret_cast<Arena::ReusableSlot*>(ptr);
ptr += saved_size;
size -= saved_size;
}
arena._ptr = ptr;
}
// Arena - Allocation (Oneshot)
// ============================
static ASMJIT_INLINE uint32_t Arena_get_unused_block_byte_count(Arena::ManagedBlock* block, const uint8_t* ptr) noexcept {
return uint32_t(size_t(block->end() - ptr));
}
void* Arena::_alloc_oneshot(size_t size) noexcept {
// Must hold otherwise we would end up with an unaligned pointer in the Arena.
ASMJIT_ASSERT(Support::is_aligned(size, Arena::kAlignment));
// Total overhead per a block allocated with malloc - we want to decrease the size of each block by this value to
// make sure that malloc is not mmapping() additional page just to hold metadata.
constexpr size_t kBlockSizeOverhead = sizeof(ManagedBlock) + Globals::kAllocOverhead + kArenaAlignmentOverhead;
ManagedBlock* cur_block = _current_block;
ManagedBlock* next = cur_block->next;
uint32_t unused_byte_count = Arena_get_unused_block_byte_count(cur_block, _ptr);
// If the `Arena` has been soft-reset the current block doesn't have to be the last one. Check if there is a block
// that can be used instead of allocating a new one. If there is a `next` block it's completely unused, we don't
// have to check for remaining bytes in that case.
while (next) {
uint8_t* ptr = Support::align_up(next->data(), Arena::kAlignment);
uint8_t* end = next->end();
if (size <= (size_t)(end - ptr)) {
_current_block = next;
_ptr = ptr + size;
_end = end;
_unused_byte_count += unused_byte_count;
ASMJIT_ASSERT(_ptr <= _end);
return static_cast<void*>(ptr);
}
ManagedBlock* block_to_free = next;
cur_block->next = next;
next = next->next;
Arena_free(block_to_free);
}
// Calculates the initial size of a next block - in most cases this would be enough for the allocation. In
// general we want to gradually increase block size when more and more blocks are allocated until the maximum
// block size. Since we use shifts (aka log2(size) sizes) we just need block count and minumum/maximum block
// size shift to calculate the final size.
uint32_t block_size_shift = uint32_t(_current_block_size_shift);
size_t block_size = size_t(1) << block_size_shift;
// Allocate a new block. We have to accommodate all possible overheads so after the memory is allocated and
// then properly aligned there will be size for the requested memory. In 99.9999% cases this is never a problem,
// but we must be sure that even rare border cases would allocate properly.
if (ASMJIT_UNLIKELY(size > block_size - kBlockSizeOverhead)) {
// If the requested size is larger than a default calculated block size -> increase block size so the
// allocation would be enough to fit the requested size.
if (ASMJIT_UNLIKELY(size > SIZE_MAX - kBlockSizeOverhead)) {
// This would probably never happen in practice - however, it needs to be done to stop malicious cases like
// `alloc(SIZE_MAX)`.
return nullptr;
}
block_size = size + kArenaAlignmentOverhead + sizeof(ManagedBlock);
}
else {
block_size -= Globals::kAllocOverhead;
}
// Allocate new block.
ManagedBlock* new_block = static_cast<ManagedBlock*>(Arena_malloc(block_size));
if (ASMJIT_UNLIKELY(!new_block)) {
return nullptr;
}
// If this doesn't hold the whole code is broken as we would use more space than allocated due to call to align_up().
ASMJIT_ASSERT(Support::is_aligned(new_block, Globals::kAllocAlignment));
// block_size includes the struct size, which must be accounted when assigning size to a newly allocated block.
size_t real_block_size = block_size - sizeof(ManagedBlock);
new_block->next = next;
new_block->size = real_block_size;
if (cur_block == &_arena_zero_block) {
_first_block = new_block;
}
else {
cur_block->next = new_block;
}
uint8_t* ptr = Support::align_up(new_block->data(), Arena::kAlignment);
uint8_t* end = new_block->data() + real_block_size;
_ptr = ptr + size;
_end = end;
_current_block = new_block;
_current_block_size_shift = uint8_t(Support::min<uint32_t>(uint32_t(block_size_shift) + 1u, _max_block_size_shift));
_unused_byte_count += unused_byte_count;
ASMJIT_ASSERT(_ptr <= _end);
return static_cast<void*>(ptr);
}
void* Arena::_alloc_oneshot_zeroed(size_t size) noexcept {
ASMJIT_ASSERT(Support::is_aligned(size, Arena::kAlignment));
void* p = alloc_oneshot(size);
if (ASMJIT_UNLIKELY(!p)) {
return p;
}
return memset(p, 0, size);
}
void* Arena::dup(const void* data, size_t size, bool null_terminate) noexcept {
if (ASMJIT_UNLIKELY(!data || !size)) {
return nullptr;
}
ASMJIT_ASSERT(size != SIZE_MAX);
size_t alloc_size = Support::align_up(size + size_t(null_terminate), Arena::kAlignment);
uint8_t* m = alloc_oneshot<uint8_t>(alloc_size);
if (ASMJIT_UNLIKELY(!m)) {
return nullptr;
}
// Clear the last 8 bytes, which clears potential padding and null terminates at the same time.
static_assert(Arena::kAlignment == 8u, "the code below must be fixed if arena alignment has changed");
Support::storeu<uint64_t>(m + alloc_size - sizeof(uint64_t), 0u);
memcpy(m, data, size);
return static_cast<void*>(m);
}
char* Arena::sformat(const char* fmt, ...) noexcept {
if (ASMJIT_UNLIKELY(!fmt)) {
return nullptr;
}
char buf[512];
size_t size;
va_list ap;
va_start(ap, fmt);
size = unsigned(vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf) - 1, fmt, ap));
va_end(ap);
buf[size++] = 0;
return static_cast<char*>(dup(buf, size));
}
// Arena - Allocation (Reusable)
// =============================
void* Arena::_alloc_reusable(size_t size, Out<size_t> allocated_size) noexcept {
// Use the memory pool only if the requested block has a reasonable size.
size_t slot;
if (_get_reusable_slot_index(size, Out(slot), allocated_size)) {
// Slot reuse.
uint8_t* p = reinterpret_cast<uint8_t*>(_reusable_slots[slot]);
size = *allocated_size;
if (p) {
_reusable_slots[slot] = reinterpret_cast<ReusableSlot*>(p)->next;
return p;
}
p = _ptr;
size_t remaining_size = (size_t)(_end - p);
if (ASMJIT_LIKELY(remaining_size >= size)) {
_ptr = p + size;
return p;
}
// Distribute the remaining memory to reusable slots, if possible.
Arena_make_block_leftover_reusable(*this, p, remaining_size);
p = static_cast<uint8_t*>(_alloc_oneshot(size));
if (ASMJIT_UNLIKELY(!p)) {
allocated_size = 0;
return nullptr;
}
return p;
}
else {
// Allocate a dynamic block.
size_t dynamic_block_overhead = Support::align_up(sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kArenaAlignmentOverhead, kAlignment);
// Handle a possible overflow.
if (ASMJIT_UNLIKELY(size >= SIZE_MAX - dynamic_block_overhead)) {
return nullptr;
}
void* p = Arena_malloc(size + dynamic_block_overhead);
if (ASMJIT_UNLIKELY(!p)) {
allocated_size = 0;
return nullptr;
}
// If this doesn't hold the whole code is broken as we would use more space than allocated due to call to align_up().
ASMJIT_ASSERT(Support::is_aligned(p, Globals::kAllocAlignment));
// Link as first in `_dynamic_blocks` double-linked list.
DynamicBlock* dynamic_block = static_cast<DynamicBlock*>(p);
DynamicBlock* next = _dynamic_blocks;
if (next) {
next->prev = dynamic_block;
}
dynamic_block->prev = nullptr;
dynamic_block->next = next;
_dynamic_blocks = dynamic_block;
// Align the pointer to the guaranteed alignment and store `DynamicBlock`
// at the beginning of the memory block, so `_release_dynamic()` can find it.
p = Support::align_up(static_cast<uint8_t*>(p) + sizeof(DynamicBlock) + sizeof(DynamicBlock*), kAlignment);
reinterpret_cast<DynamicBlock**>(p)[-1] = dynamic_block;
allocated_size = size;
return p;
}
}
void* Arena::_alloc_reusable_zeroed(size_t size, Out<size_t> allocated_size) noexcept {
void* p = _alloc_reusable(size, allocated_size);
if (ASMJIT_UNLIKELY(!p)) {
return p;
}
return memset(p, 0, *allocated_size);
}
void Arena::_release_dynamic(void* p, size_t size) noexcept {
Support::maybe_unused(size);
// Pointer to `DynamicBlock` is stored at [-1].
DynamicBlock* dynamic_block = reinterpret_cast<DynamicBlock**>(p)[-1];
ASMJIT_ASSERT(Arena_has_dynamic_block(*this, dynamic_block));
// Unlink and free.
DynamicBlock* prev = dynamic_block->prev;
DynamicBlock* next = dynamic_block->next;
if (prev) {
prev->next = next;
}
else {
_dynamic_blocks = next;
}
if (next) {
next->prev = prev;
}
Arena_free(dynamic_block);
}
// Arena - Statistics
// ==================
ArenaStatistics Arena::statistics() const noexcept {
const ManagedBlock* block = _first_block;
size_t block_count = 0u;
size_t used_size = 0u;
size_t reserved_size = 0u;
while (block) {
if (_ptr >= block->data() && _ptr <= block->end()) {
size_t offset = size_t(_ptr - block->data());
used_size = reserved_size + offset;
}
block_count++;
reserved_size += block->size;
block = block->next;
}
ArenaStatistics stats {};
stats._block_count = block_count;
stats._used_size = used_size;
stats._reserved_size = reserved_size;
stats._overhead_size = _unused_byte_count;
return stats;
}
// Arena - Tests
// =============
#if defined(ASMJIT_TEST)
UNIT(arena_oneshot) {
struct SomeData {
size_t _x;
size_t _y;
inline SomeData(size_t x, size_t y) noexcept
: _x(x), _y(y) {}
};
constexpr size_t kN = 100000u;
{
Arena arena(1024u * 4u);
for (size_t r = 0; r < 3u; r++) {
for (size_t i = 0; i < kN; i++) {
uint8_t* p = arena.alloc_oneshot<uint8_t>(32);
EXPECT_NOT_NULL(p);
}
ArenaStatistics stats = arena.statistics();
EXPECT_GE(stats.block_count(), 2u);
EXPECT_GE(stats.used_size(), kN * 32u);
EXPECT_GE(stats.reserved_size(), kN * 32u);
EXPECT_GE(stats.reserved_size(), stats.used_size());
arena.reset(r == 0 ? ResetPolicy::kSoft : ResetPolicy::kHard);
}
}
{
Arena arena(1024u * 4u);
for (size_t r = 0; r < 3u; r++) {
for (size_t i = 0; i < kN; i++) {
SomeData* p = arena.new_oneshot<SomeData>(r, i);
EXPECT_NOT_NULL(p);
}
arena.reset(r == 0 ? ResetPolicy::kSoft : ResetPolicy::kHard);
}
}
}
UNIT(arena_reusable_slots_check) {
constexpr size_t kMinReusableSlotSize = Arena::kMinReusableSlotSize;
constexpr size_t kMaxReusableSlotSize = Arena::kMaxReusableSlotSize;
size_t expected_slot = 0;
size_t expected_until = kMinReusableSlotSize;
for (size_t size = 1; size <= kMaxReusableSlotSize; size++) {
size_t acquired_slot;
EXPECT_TRUE(Arena::_get_reusable_slot_index(size, Out(acquired_slot)));
EXPECT_EQ(acquired_slot, expected_slot);
EXPECT_LT(acquired_slot, Arena::kReusableSlotCount);
if (size == expected_until) {
expected_slot++;
expected_until *= 2;
}
}
}
#endif // ASMJIT_TEST
ASMJIT_END_NAMESPACE

498
src/asmjit/core/arena.h Normal file
View File

@@ -0,0 +1,498 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENA_H_INCLUDED
#define ASMJIT_CORE_ARENA_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
//! Arena allocation statistics.
struct ArenaStatistics {
//! \name Members
//! \{
//! Number of blocks maintained.
//!
//! A block is a bigger chunk of memory that is used by \ref Arena.
size_t _block_count;
//! Number of bytes allocated and in use.
size_t _used_size;
//! Number of bytes reserved.
size_t _reserved_size;
//! Overhead describes
size_t _overhead_size;
//! Number of bytes pooled by \ref Arena reusable pools and eventually \ref ArenaPool if aggregated.
size_t _pooled_size;
//! \}
//! \name Accessors
//! \{
//! Returns the number of blocks maintained by \ref Arena (or multiple Arenas if aggregated).
ASMJIT_INLINE_NODEBUG size_t block_count() const noexcept { return _block_count; }
//! Returns the number or bytes used by \ref Arena (or multiple Arenas if aggregated).
//!
//! Used bytes represent the number of bytes successfully allocated by \ref Arena regardless of how these bytes
//! are actually used. For example if \ref Arena is used with \ref ArenaPool, the number of used bytes pooled
//! by \ref ArenaPool is included in used_size, because it was already returned by \ref Arena.
ASMJIT_INLINE_NODEBUG size_t used_size() const noexcept { return _used_size; }
//! Returns the number of bytes reserved by \ref Arena (or multiple Arenas if aggregated).
ASMJIT_INLINE_NODEBUG size_t reserved_size() const noexcept { return _reserved_size; }
//! Returns the number of bytes that were allocated, but couldn't be used by allocations because of size
//! requests, alignment, or other reasons. The overhead should be relatively small with \ref Arena, but still
//! can be used to find pathological cases if they happen for some reason.
ASMJIT_INLINE_NODEBUG size_t overhead_size() const noexcept { return _overhead_size; }
//! Returns the number of bytes pooled by \ref Arena reusable pools and eventually \ref ArenaPool if aggregated.
ASMJIT_INLINE_NODEBUG size_t pooled_size() const noexcept { return _pooled_size; }
//! \}
//! \name Aggregation
//! \{
ASMJIT_INLINE void aggregate(const ArenaStatistics& other) noexcept {
_block_count += other._block_count;
_used_size += other._used_size;
_reserved_size += other._reserved_size;
_overhead_size += other._overhead_size;
_pooled_size += other._pooled_size;
}
ASMJIT_INLINE ArenaStatistics& operator+=(const ArenaStatistics& other) noexcept {
aggregate(other);
return *this;
}
//! \}
};
//! Arena allocator is an incremental memory allocator that allocates memory by simply incrementing a pointer. It
//! allocates blocks of memory by using C's `malloc()`, but divides these blocks into smaller segments requested by
//! calling `Arena::alloc()` and friends.
class Arena {
public:
ASMJIT_NONCOPYABLE(Arena)
//! Default alignment of allocation requests to use when using Arena.
static inline constexpr size_t kAlignment = 8u;
//! \cond INTERNAL
//! Minimum managed block size.
static inline constexpr size_t kMinManagedBlockSize = 1024;
//! Maximum managed block size.
static inline constexpr size_t kMaxManagedBlockSize = size_t(1) << (sizeof(size_t) * 8 - 2);
//! Number of slots.
static inline constexpr size_t kReusableSlotCount = 8;
//! How many bytes are in the first slot.
static inline constexpr size_t kMinReusableSlotSize = 16;
//! How many bytes are in the last slot.
static inline constexpr size_t kMaxReusableSlotSize = kMinReusableSlotSize << (kReusableSlotCount - 1u);
//! A single block of memory managed by `Arena`.
struct alignas(kAlignment) ManagedBlock {
//! Link to the next managed block (single-linked list).
ManagedBlock* next;
//! Size represents the number of bytes that can be allocated (it doesn't include block overhead).
size_t size;
ASMJIT_INLINE_NODEBUG uint8_t* data() const noexcept {
return const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(this) + sizeof(*this));
}
ASMJIT_INLINE_NODEBUG uint8_t* end() const noexcept {
return data() + size;
}
};
//! Single-linked list used to store unused reusable chunks.
struct ReusableSlot {
//! Link to a next slot in a single-linked list.
ReusableSlot* next;
};
//! A large block of memory that has been allocated dynamically and is not part of managed blocks used by the
//! allocator. Arena keeps track of these blocks and always releases them when the Arena is destroyed or reset.
struct DynamicBlock {
DynamicBlock* prev;
DynamicBlock* next;
};
//! Returns the slot index to be used for the given `size`. Returns `true` if a valid slot has been written to `slot`.
[[nodiscard]]
static ASMJIT_INLINE bool _get_reusable_slot_index(size_t size, Out<size_t> slot) noexcept {
slot = Support::bit_size_of<size_t> - 4u - Support::clz((size - 1u) | 0xF);
return *slot < kReusableSlotCount;
}
//! Returns the slot index to be used for the given `size`. Returns `true` if a valid slot has been written to `slot`
//! and `allocated_size` has been filled with slot exact size (`allocated_size` can be equal or slightly greater than
//! `size`).
[[nodiscard]]
static ASMJIT_INLINE bool _get_reusable_slot_index(size_t size, Out<size_t> slot, Out<size_t> allocated_size) noexcept {
slot = Support::bit_size_of<size_t> - 4u - Support::clz((size - 1u) | 0xF);
allocated_size = kMinReusableSlotSize << *slot;
return *slot < kReusableSlotCount;
}
//! \endcond
template<typename T>
static ASMJIT_INLINE_CONSTEXPR size_t aligned_size_of() noexcept { return Support::align_up(sizeof(T), kAlignment); }
static ASMJIT_INLINE_CONSTEXPR size_t aligned_size(size_t size) noexcept { return Support::align_up(size, kAlignment); }
//! \name Members
//! \{
//! Pointer in the current block.
uint8_t* _ptr {};
//! End of the current block.
uint8_t* _end {};
//! Current block.
ManagedBlock* _current_block {};
//! First block (single-linked list).
ManagedBlock* _first_block {};
//! Current block size shift - reverted to _min_block_size_shift every time the Arena is `reset(ResetPolicy::kHard)`.
uint8_t _current_block_size_shift {};
//! Minimum log2(block_size) to allocate.
uint8_t _min_block_size_shift {};
//! Maximum log2(block_size) to allocate.
uint8_t _max_block_size_shift {};
//! True when the Arena has a static block (static blocks are used by ArenaTmp).
uint8_t _has_static_block {};
//! Unused bytes (remaining bytes in blocks that couldn't be used because of size requests).
uint32_t _unused_byte_count {};
//! Slots that contain reusable memory chunks.
ReusableSlot* _reusable_slots[kReusableSlotCount] {};
//! Large blocks for allocations that either couldn't use slots or one-shot allocation.
DynamicBlock* _dynamic_blocks {};
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new Arena.
//!
//! The `min_block_size` parameter describes the default size of the block. If the `size` parameter passed to
//! `alloc()` is greater than the default size `Arena` will allocate and use a larger block, but it will not change
//! the default `min_block_size`.
//!
//! It's not required, but it's good practice to set `min_block_size` to a reasonable value that depends on the
//! usage of `Arena`. Greater block sizes are generally safer and perform better than unreasonably low block sizes.
ASMJIT_INLINE_NODEBUG explicit Arena(size_t min_block_size) noexcept {
_init(min_block_size, Span<uint8_t>{});
}
//! Creates a new Arena with a first block pointing to `static_arena_memory`.
ASMJIT_INLINE_NODEBUG Arena(size_t min_block_size, Span<uint8_t> static_arena_memory) noexcept {
_init(min_block_size, static_arena_memory);
}
//! Destroys the `Arena` instance.
//!
//! This will destroy the `Arena` instance and release all blocks of memory allocated by it. It performs implicit
//! `reset(ResetPolicy::kHard)`.
ASMJIT_INLINE_NODEBUG ~Arena() noexcept { reset(ResetPolicy::kHard); }
ASMJIT_API void _init(size_t min_block_size, Span<uint8_t> static_arena_memory) noexcept;
//! Resets the `Arena` invalidating all blocks allocated.
//!
//! See `ResetPolicy` for more details.
ASMJIT_API void reset(ResetPolicy reset_policy = ResetPolicy::kSoft) noexcept;
//! \}
//! \name Accessors
//! \{
//! Returns a minimum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t min_block_size() const noexcept { return size_t(1) << _min_block_size_shift; }
//! Returns a maximum block size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t max_block_size() const noexcept { return size_t(1) << _max_block_size_shift; }
//! Tests whether this `Arena` is actually a `ArenaTmp` that uses temporary memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t has_static_block() const noexcept { return _has_static_block; }
//! Returns remaining size of the current block.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remaining_size() const noexcept { return (size_t)(_end - _ptr); }
//! Returns the current arena cursor (dangerous).
//!
//! This is a function that can be used to get exclusive access to the current block's memory buffer.
template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* ptr() noexcept { return reinterpret_cast<T*>(_ptr); }
//! Returns the end of the current arena block, only useful if you use `ptr()`.
template<typename T = uint8_t>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* end() noexcept { return reinterpret_cast<T*>(_end); }
//! Sets the current arena pointer to `ptr` (must be within the current block).
template<typename T>
ASMJIT_INLINE void set_ptr(T* ptr) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(ptr);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_ptr = p;
}
//! Sets the end arena pointer to `end` (must be within the current block).
template<typename T>
ASMJIT_INLINE void set_end(T* end) noexcept {
uint8_t* p = reinterpret_cast<uint8_t*>(end);
ASMJIT_ASSERT(p >= _ptr && p <= _end);
_end = p;
}
//! \}
//! \name Oneshot Allocation
//! \{
//! \cond INTERNAL
//! Internal alloc function used by inline wrappers.
[[nodiscard]]
ASMJIT_API void* _alloc_oneshot(size_t size) noexcept;
//! \endcond
//! Allocates the requested memory specified by `size` and optionally casts the returned value to `T*`.
//!
//! Pointer returned is valid until the `Arena` instance is destroyed or reset by calling `reset()`. If you plan to
//! make an instance of C++ from the given pointer use placement `new` and `delete` operators:
//!
//! ```
//! using namespace asmjit;
//!
//! class Object { ... };
//!
//! // Create Arena with default block size of 65536 bytes (the maximum size per alloc() would be slightly less).
//! Arena arena(65536);
//!
//! // Create your objects using arena object allocating, for example:
//! Object* obj = static_cast<Object*>( arena.alloc(Arena::aligned_size_of<Object>() );
//!
//! if (!obj) {
//! // Handle out of memory error.
//! }
//!
//! // Placement `new` and `delete` operators can be used to instantiate it.
//! new(obj) Object();
//!
//! // ... lifetime of your objects ...
//!
//! // To destroy the instance (if required).
//! obj->~Object();
//!
//! // Reset or destroy `Arena`.
//! arena.reset();
//! ```
template<typename T = void>
[[nodiscard]]
ASMJIT_INLINE T* alloc_oneshot(size_t size) noexcept {
ASMJIT_ASSERT(Support::is_aligned(size, kAlignment));
#if defined(__GNUC__)
// We can optimize this function a little bit if we know that `size` is relatively small - which would mean
// that we cannot possibly overflow `_ptr`. Since most of the time `alloc()` is used for known types (which
// implies their size is known as well) this optimization is worth it as it may save us 1 or 2 instructions.
if (__builtin_constant_p(size) && size <= 1024u) {
uint8_t* after = _ptr + size;
if (ASMJIT_UNLIKELY(after > _end)) {
return static_cast<T*>(_alloc_oneshot(size));
}
void* p = static_cast<void*>(_ptr);
_ptr = after;
return static_cast<T*>(p);
}
#endif
if (ASMJIT_UNLIKELY(size > remaining_size())) {
return static_cast<T*>(_alloc_oneshot(size));
}
void* p = static_cast<void*>(_ptr);
_ptr += size;
return static_cast<T*>(p);
}
template<typename T>
[[nodiscard]]
ASMJIT_INLINE T* alloc_oneshot() noexcept {
return alloc_oneshot<T>(aligned_size_of<T>());
}
//! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
[[nodiscard]]
ASMJIT_API void* _alloc_oneshot_zeroed(size_t size) noexcept;
//! Allocates `size` bytes of zeroed memory. See `alloc()` for more details.
template<typename T = void>
[[nodiscard]]
ASMJIT_INLINE T* alloc_oneshot_zeroed(size_t size) noexcept {
return static_cast<T*>(_alloc_oneshot_zeroed(size));
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Arena`.
template<typename T>
[[nodiscard]]
ASMJIT_INLINE T* new_oneshot() noexcept {
void* p = alloc_oneshot(aligned_size_of<T>());
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
return new(Support::PlacementNew{p}) T();
}
//! Like `new(std::nothrow) T(...)`, but allocated by `Arena`.
template<typename T, typename... Args>
[[nodiscard]]
ASMJIT_INLINE T* new_oneshot(Args&&... args) noexcept {
void* p = alloc_oneshot(aligned_size_of<T>());
if (ASMJIT_UNLIKELY(!p)) {
return nullptr;
}
return new(Support::PlacementNew{p}) T(std::forward<Args>(args)...);
}
//! Helper to duplicate data.
[[nodiscard]]
ASMJIT_API void* dup(const void* data, size_t size, bool null_terminate = false) noexcept;
//! Helper to duplicate a formatted string, maximum size is 256 bytes.
[[nodiscard]]
ASMJIT_API char* sformat(const char* str, ...) noexcept;
//! \}
//! \name Reusable Allocation
//! \{
//! \cond INTERNAL
[[nodiscard]]
ASMJIT_API void* _alloc_reusable(size_t size, Out<size_t> allocated_size) noexcept;
[[nodiscard]]
ASMJIT_API void* _alloc_reusable_zeroed(size_t size, Out<size_t> allocated_size) noexcept;
ASMJIT_API void _release_dynamic(void* p, size_t size) noexcept;
//! \endcond
//! Allocates `size` bytes of memory, ideally from an available pool.
//!
//! \note `size` can't be zero, it will assert in debug mode in such case.
template<typename T = void>
[[nodiscard]]
inline T* alloc_reusable(size_t size) noexcept {
size_t dummy_allocated_size;
return static_cast<T*>(_alloc_reusable(size, Out(dummy_allocated_size)));
}
//! Like `alloc(size)`, but provides a second argument `allocated_size` that provides a way to know how big
//! the block returned actually is. This is useful for containers to prevent growing too early.
template<typename T = void>
[[nodiscard]]
inline T* alloc_reusable(size_t size, Out<size_t> allocated_size) noexcept {
return static_cast<T*>(_alloc_reusable(size, allocated_size));
}
//! Like `alloc(size)`, but returns zeroed memory.
template<typename T = void>
[[nodiscard]]
inline T* alloc_reusable_zeroed(size_t size) noexcept {
size_t dummy_allocated_size;
return static_cast<T*>(_alloc_reusable_zeroed(size, Out(dummy_allocated_size)));
}
//! Like `alloc(size, allocated_size)`, but returns zeroed memory.
template<typename T = void>
[[nodiscard]]
inline T* alloc_reusable_zeroed(size_t size, Out<size_t> allocated_size) noexcept {
return static_cast<T*>(_alloc_reusable_zeroed(size, allocated_size));
}
//! Releases the memory previously allocated by `alloc()`. The `size` argument has to be either the same `size`
//! as used to call `alloc()` or `allocated_size` returned by `alloc()`.
inline void free_reusable(void* p, size_t size) noexcept {
ASMJIT_ASSERT(p != nullptr);
ASMJIT_ASSERT(size != 0);
size_t slot;
if (_get_reusable_slot_index(size, Out(slot))) {
static_cast<ReusableSlot*>(p)->next = static_cast<ReusableSlot*>(_reusable_slots[slot]);
_reusable_slots[slot] = static_cast<ReusableSlot*>(p);
}
else {
_release_dynamic(p, size);
}
}
//! \}
//! \name Statistics
//! \{
//! Calculates and returns statistics related to the current use of this \ref Arena.
//!
//! \note This function fills all members, but `_pooled_size` member (see \ref ArenaStatistics::pooled_size()
//! function) would be assigned to zero as \ref Arena has no clue about the use of the requested memory.
//!
//! \attention This function could be relatively expensive depending on the number of blocks that is managed by
//! the allocator. The primary case of this function is to use it during the development to get an idea about
//! the use of \ref Arena (or use of multiple Arenas if the statistics is aggregated).
ASMJIT_API ArenaStatistics statistics() const noexcept;
//! \}
};
//! \ref Arena with `N` bytes of a static storage, used for the initial block.
//!
//! Temporary arenas are used in cases where it's known that some memory will be required, but in many cases it won't
//! exceed N bytes, so the whole operation can be performed without a dynamic memory allocation.
template<size_t N>
class ArenaTmp : public Arena {
public:
ASMJIT_NONCOPYABLE(ArenaTmp)
//! Temporary storage, embedded after \ref Arena.
struct alignas(Arena::kAlignment) Storage {
uint8_t data[N];
} _storage;
//! Creates a temporary arena. Dynamic block size is specified by `min_block_size`.
ASMJIT_INLINE explicit ArenaTmp(size_t min_block_size) noexcept
: Arena(min_block_size, Span<uint8_t>(_storage.data, N)) {}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENA_H_INCLUDED

View File

@@ -0,0 +1,253 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/arena.h"
#include "../core/arenabitset_p.h"
ASMJIT_BEGIN_NAMESPACE
// ArenaBitSet - Operations
// ===========================
Error ArenaBitSet::copy_from(Arena& arena, const ArenaBitSet& other) noexcept {
BitWord* data = _data;
size_t new_size = other.size();
if (!new_size) {
_size = 0;
return Error::kOk;
}
if (new_size > _capacity) {
// Realloc needed... Calculate the minimum capacity (in bytes) required.
size_t minimum_capacity_in_bits = Support::align_up<size_t>(new_size, Support::bit_size_of<BitWord>);
if (ASMJIT_UNLIKELY(minimum_capacity_in_bits < new_size)) {
return make_error(Error::kOutOfMemory);
}
// Normalize to bytes.
size_t minimum_capacity = minimum_capacity_in_bits / 8u;
size_t allocated_capacity;
BitWord* new_data = static_cast<BitWord*>(arena.alloc_reusable(minimum_capacity, Out(allocated_capacity)));
if (ASMJIT_UNLIKELY(!new_data)) {
return make_error(Error::kOutOfMemory);
}
// `allocated_capacity` now contains number in bytes, we need bits.
size_t allocated_capacity_in_bits = allocated_capacity * 8;
// Arithmetic overflow should normally not happen. If it happens we just
// change the `allocated_capacity_in_bits` to the `minimum_capacity_in_bits` as
// this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocated_capacity_in_bits < allocated_capacity)) {
allocated_capacity_in_bits = minimum_capacity_in_bits;
}
if (data) {
arena.free_reusable(data, _capacity / 8);
}
data = new_data;
_data = data;
_capacity = uint32_t(allocated_capacity_in_bits);
}
_size = uint32_t(new_size);
_copy_bits(data, other.data(), _words_per_bits(uint32_t(new_size)));
return Error::kOk;
}
Error ArenaBitSet::_resize(Arena& arena, size_t new_size, size_t ideal_capacity, bool new_bits_value) noexcept {
ASMJIT_ASSERT(ideal_capacity >= new_size);
if (new_size <= _size) {
// The size after the resize is lesser than or equal to the current size.
size_t idx = new_size / Support::bit_size_of<BitWord>;
size_t bit = new_size % Support::bit_size_of<BitWord>;
// Just set all bits outside of the new size in the last word to zero. There is a case that there are not bits
// to set if `bit` is zero. This happens when `new_size` is a multiply of `bit_size_of<BitWord>` like 64, 128,
// and so on. In that case don't change anything as that would mean settings bits outside of the `_size`.
if (bit) {
_data[idx] &= (BitWord(1) << bit) - 1u;
}
_size = uint32_t(new_size);
return Error::kOk;
}
size_t old_size = _size;
BitWord* data = _data;
if (new_size > _capacity) {
// Realloc needed, calculate the minimum capacity (in bytes) required.
size_t minimum_capacity_in_bits = Support::align_up(ideal_capacity, Support::bit_size_of<BitWord>);
if (ASMJIT_UNLIKELY(minimum_capacity_in_bits < new_size)) {
return make_error(Error::kOutOfMemory);
}
// Normalize to bytes.
size_t minimum_capacity = minimum_capacity_in_bits / 8u;
size_t allocated_capacity;
BitWord* new_data = static_cast<BitWord*>(arena.alloc_reusable(minimum_capacity, Out(allocated_capacity)));
if (ASMJIT_UNLIKELY(!new_data)) {
return make_error(Error::kOutOfMemory);
}
// `allocated_capacity` now contains number in bytes, we need bits.
size_t allocated_capacity_in_bits = allocated_capacity * 8u;
// Arithmetic overflow should normally not happen. If it happens we just change the `allocated_capacity_in_bits`
// to the `minimum_capacity_in_bits` as this value is still safe to be used to call `_allocator->release(...)`.
if (ASMJIT_UNLIKELY(allocated_capacity_in_bits < allocated_capacity)) {
allocated_capacity_in_bits = minimum_capacity_in_bits;
}
_copy_bits(new_data, data, _words_per_bits(old_size));
if (data) {
arena.free_reusable(data, _capacity / 8);
}
data = new_data;
_data = data;
_capacity = uint32_t(allocated_capacity_in_bits);
}
// Start (of the old size) and end (of the new size) bits
size_t idx = old_size / Support::bit_size_of<BitWord>;
size_t start_bit = old_size % Support::bit_size_of<BitWord>;
size_t end_bit = new_size % Support::bit_size_of<BitWord>;
// Set new bits to either 0 or 1. The `pattern` is used to set multiple
// bits per bit-word and contains either all zeros or all ones.
BitWord pattern = Support::bool_as_mask<BitWord>(new_bits_value);
// First initialize the last bit-word of the old size.
if (start_bit) {
size_t num_bits = 0;
if (idx == (new_size / Support::bit_size_of<BitWord>)) {
// The number of bit-words is the same after the resize. In that case
// we need to set only bits necessary in the current last bit-word.
ASMJIT_ASSERT(start_bit < end_bit);
num_bits = end_bit - start_bit;
}
else {
// There is be more bit-words after the resize. In that case we don't
// have to be extra careful about the last bit-word of the old size.
num_bits = Support::bit_size_of<BitWord> - start_bit;
}
data[idx++] |= pattern << num_bits;
}
// Initialize all bit-words after the last bit-word of the old size.
size_t end_index = _words_per_bits(new_size);
while (idx < end_index) {
data[idx++] = pattern;
}
// Clear unused bits of the last bit-word.
if (end_bit) {
data[end_index - 1] = pattern & ((BitWord(1) << end_bit) - 1);
}
_size = uint32_t(new_size);
return Error::kOk;
}
Error ArenaBitSet::_append(Arena& arena, bool value) noexcept {
constexpr uint32_t kThreshold = Globals::kGrowThreshold * 8u;
uint32_t new_size = _size + 1;
uint32_t ideal_capacity = _capacity;
if (ideal_capacity < 128) {
ideal_capacity = 128;
}
else if (ideal_capacity <= kThreshold) {
ideal_capacity *= 2;
}
else {
ideal_capacity += kThreshold;
}
if (ASMJIT_UNLIKELY(ideal_capacity < _capacity)) {
if (ASMJIT_UNLIKELY(_size == std::numeric_limits<uint32_t>::max())) {
return make_error(Error::kOutOfMemory);
}
ideal_capacity = new_size;
}
return _resize(arena, new_size, ideal_capacity, value);
}
// ArenaBitSet - Tests
// ======================
#if defined(ASMJIT_TEST)
static void test_arena_bitvector(Arena& arena) {
uint32_t i, count;
uint32_t kMaxCount = 100;
ArenaBitSet vec;
EXPECT_TRUE(vec.is_empty());
EXPECT_EQ(vec.size(), 0u);
INFO("ArenaBitSet::resize()");
for (count = 1; count < kMaxCount; count++) {
vec.clear();
EXPECT_EQ(vec.resize(arena, count, false), Error::kOk);
EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++) {
EXPECT_FALSE(vec.bit_at(i));
}
vec.clear();
EXPECT_EQ(vec.resize(arena, count, true), Error::kOk);
EXPECT_EQ(vec.size(), count);
for (i = 0; i < count; i++) {
EXPECT_TRUE(vec.bit_at(i));
}
}
INFO("ArenaBitSet::fill_bits() / clear_bits()");
for (count = 1; count < kMaxCount; count += 2) {
vec.clear();
EXPECT_EQ(vec.resize(arena, count), Error::kOk);
EXPECT_EQ(vec.size(), count);
for (i = 0; i < (count + 1) / 2; i++) {
bool value = bool(i & 1);
if (value) {
vec.fill_bits(i, count - i * 2);
}
else {
vec.clear_bits(i, count - i * 2);
}
}
for (i = 0; i < count; i++) {
EXPECT_EQ(vec.bit_at(i), bool(i & 1));
}
}
}
UNIT(arena_bitvector, -1) {
Arena arena(8192);
test_arena_bitvector(arena);
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -0,0 +1,436 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENABITSET_P_H_INCLUDED
#define ASMJIT_CORE_ARENABITSET_P_H_INCLUDED
#include "../core/arena.h"
#include "../core/span.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
using BitWord = Support::BitWord;
namespace BitOps {
namespace {
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size_in_bits(const Span<T>& span) noexcept { return span.size() * Support::bit_size_of<T>; }
template<typename T>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size_in_words(size_t num_bits) noexcept { return (num_bits + Support::bit_size_of<T> - 1u) / Support::bit_size_of<T>; }
template<typename T, typename Index>
[[nodiscard]]
ASMJIT_INLINE bool bit_at(const Span<T>& span, const Index& index) noexcept {
size_t i = Support::as_basic_uint(index);
size_t word_index = i / Support::bit_size_of<T>;
size_t bit_index = i % Support::bit_size_of<T>;
return bool((span[word_index] >> bit_index) & 0x1u);
}
template<typename T, typename Index>
ASMJIT_INLINE void set_bit(const Span<T>& span, const Index& index, bool value) noexcept {
size_t i = Support::as_basic_uint(index);
size_t word_index = i / Support::bit_size_of<T>;
size_t bit_index = i % Support::bit_size_of<T>;
T and_mask = T(~(T(1u) << bit_index));
T bit_mask = T(T(value) << bit_index);
T& bit_word = span[word_index];
bit_word = T((bit_word & and_mask) | bit_mask);
}
template<typename T, typename Index>
ASMJIT_INLINE void clear_bit(const Span<T>& span, const Index& index) noexcept {
size_t i = Support::as_basic_uint(index);
size_t word_index = i / Support::bit_size_of<T>;
size_t bit_index = i % Support::bit_size_of<T>;
T and_mask = T(~(T(1u) << bit_index));
T& bit_word = span[word_index];
bit_word = T(bit_word & and_mask);
}
template<typename T, typename Index>
ASMJIT_INLINE void or_bit(const Span<T>& span, const Index& index, bool value) noexcept {
size_t i = Support::as_basic_uint(index);
size_t word_index = i / Support::bit_size_of<T>;
size_t bit_index = i % Support::bit_size_of<T>;
T bit_mask = T(T(value) << bit_index);
T& bit_word = span[word_index];
bit_word = T(bit_word | bit_mask);
}
template<typename T, typename Index>
ASMJIT_INLINE void xor_bit(const Span<T>& span, const Index& index, bool value) noexcept {
size_t i = Support::as_basic_uint(index);
size_t word_index = i / Support::bit_size_of<T>;
size_t bit_index = i % Support::bit_size_of<T>;
T bit_mask = T(T(value) << bit_index);
T& bit_word = span[word_index];
bit_word = T(bit_word ^ bit_mask);
}
template<typename Op, typename T, typename... Args>
ASMJIT_INLINE void combine_spans(Span<T> dst, Args&&... args) noexcept {
size_t size = dst.size();
for (size_t i = 0u; i < size; i++) {
dst[i] = Op::op(args[i]...);
}
}
template<typename T, typename... Args>
ASMJIT_INLINE void or_(Span<T> dst, Args&&... args) noexcept {
return combine_spans<Support::Or>(dst, std::forward<Args>(args)...);
}
} // {anonymous}
} // {BitOps}
//! Arena-allocated bit vector.
class ArenaBitSet {
public:
ASMJIT_NONCOPYABLE(ArenaBitSet)
//! \name Members
//! \{
//! Bits.
BitWord* _data {};
//! Size of the bit-vector (in bits).
uint32_t _size {};
//! Capacity of the bit-vector (in bits).
uint32_t _capacity {};
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
static ASMJIT_INLINE_NODEBUG size_t _words_per_bits(size_t num_bits) noexcept {
return ((num_bits + Support::bit_size_of<BitWord> - 1u) / Support::bit_size_of<BitWord>);
}
static ASMJIT_INLINE_NODEBUG void _zero_bits(BitWord* dst, size_t bit_word_count) noexcept {
for (size_t i = 0; i < bit_word_count; i++) {
dst[i] = 0;
}
}
static ASMJIT_INLINE_NODEBUG void _fill_bits(BitWord* dst, size_t bit_word_count) noexcept {
for (size_t i = 0; i < bit_word_count; i++) {
dst[i] = ~BitWord(0);
}
}
static ASMJIT_INLINE_NODEBUG void _copy_bits(BitWord* dst, const BitWord* src, size_t bit_word_count) noexcept {
for (size_t i = 0; i < bit_word_count; i++) {
dst[i] = src[i];
}
}
//! \}
//! \endcond
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaBitSet() noexcept {}
ASMJIT_INLINE_NODEBUG ArenaBitSet(ArenaBitSet&& other) noexcept
: _data(other._data),
_size(other._size),
_capacity(other._capacity) {}
//! \}
//! \name Overloaded Operators
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator==(const ArenaBitSet& other) const noexcept { return equals(other); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool operator!=(const ArenaBitSet& other) const noexcept { return !equals(other); }
//! \}
//! \name Accessors
//! \{
//! Tests whether the bit-vector is empty (has no bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _size == 0; }
//! Returns the size of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the capacity of this bit-vector (in bits).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! Returns the size of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size_in_bit_words() const noexcept { return _words_per_bits(_size); }
//! Returns the capacity of the `BitWord[]` array in `BitWord` units.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity_in_bit_words() const noexcept { return _words_per_bits(_capacity); }
//! Returns bit-vector data as `BitWord[]`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BitWord* data() noexcept { return _data; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const BitWord* data() const noexcept { return _data; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Span<BitWord> as_span() noexcept { return Span<BitWord>(_data, size_in_bit_words()); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Span<const BitWord> as_span() const noexcept { return Span<BitWord>(_data, size_in_bit_words()); }
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE_NODEBUG void swap(ArenaBitSet& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_capacity, other._capacity);
}
ASMJIT_INLINE_NODEBUG void clear() noexcept {
_size = 0;
}
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_data = nullptr;
_size = 0u;
_capacity = 0u;
}
ASMJIT_INLINE_NODEBUG void truncate(uint32_t new_size) noexcept {
_size = Support::min(_size, new_size);
_clear_unused_bits();
}
template<typename Index>
[[nodiscard]]
inline bool bit_at(const Index& index) const noexcept {
ASMJIT_ASSERT(Support::as_basic_uint(index) < _size);
return Support::bit_vector_get_bit(_data, Support::as_basic_uint(index));
}
template<typename Index>
inline void set_bit(const Index& index, bool value) noexcept {
ASMJIT_ASSERT(Support::as_basic_uint(index) < _size);
Support::bit_vector_set_bit(_data, Support::as_basic_uint(index), value);
}
template<typename Index>
inline void add_bit(const Index& index, bool value) noexcept {
ASMJIT_ASSERT(Support::as_basic_uint(index) < _size);
Support::bit_vector_or_bit(_data, Support::as_basic_uint(index), value);
}
template<typename Index>
inline void clear_bit(const Index& index) noexcept {
ASMJIT_ASSERT(Support::as_basic_uint(index) < _size);
Support::bit_vector_set_bit(_data, Support::as_basic_uint(index), false);
}
template<typename Index>
inline void xor_bit(const Index& index, bool value) noexcept {
ASMJIT_ASSERT(Support::as_basic_uint(index) < _size);
Support::bit_vector_xor_bit(_data, Support::as_basic_uint(index), value);
}
ASMJIT_INLINE Error append(Arena& arena, bool value) noexcept {
uint32_t index = _size;
if (ASMJIT_UNLIKELY(index >= _capacity))
return _append(arena, value);
uint32_t idx = index / Support::bit_size_of<BitWord>;
uint32_t bit = index % Support::bit_size_of<BitWord>;
if (bit == 0)
_data[idx] = BitWord(value) << bit;
else
_data[idx] |= BitWord(value) << bit;
_size++;
return Error::kOk;
}
Error copy_from(Arena& arena, const ArenaBitSet& other) noexcept;
ASMJIT_INLINE void clear_all() noexcept {
_zero_bits(_data, _words_per_bits(_size));
}
ASMJIT_INLINE void fill_all() noexcept {
_fill_bits(_data, _words_per_bits(_size));
_clear_unused_bits();
}
ASMJIT_INLINE void clear_bits(size_t start, size_t count) noexcept {
ASMJIT_ASSERT(start <= size_t(_size));
ASMJIT_ASSERT(size_t(_size) - start >= count);
Support::bit_vector_clear(_data, start, count);
}
ASMJIT_INLINE void fill_bits(size_t start, size_t count) noexcept {
ASMJIT_ASSERT(start <= size_t(_size));
ASMJIT_ASSERT(size_t(_size) - start >= count);
Support::bit_vector_fill(_data, start, count);
}
//! Performs a logical bitwise AND between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are set to zero.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_INLINE void and_(const ArenaBitSet& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
size_t this_bit_word_count = size_in_bit_words();
size_t other_bit_word_count = other.size_in_bit_words();
size_t common_bit_word_count = Support::min(this_bit_word_count, other_bit_word_count);
size_t i = 0;
while (i < common_bit_word_count) {
dst[i] = dst[i] & src[i];
i++;
}
while (i < this_bit_word_count) {
dst[i] = 0;
i++;
}
}
//! Performs a logical bitwise AND between bits specified in this array and negated bits in `other`. If `other`
//! has less bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_INLINE void and_not(const ArenaBitSet& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
size_t common_bit_word_count = _words_per_bits(Support::min(_size, other._size));
for (size_t i = 0; i < common_bit_word_count; i++) {
dst[i] = dst[i] & ~src[i];
}
}
//! Performs a logical bitwise OP between bits specified in this array and bits in `other`. If `other` has less
//! bits than `this` then all remaining bits are kept intact.
//!
//! \note The size of the BitVector is unaffected by this operation.
ASMJIT_INLINE void or_(const ArenaBitSet& other) noexcept {
BitWord* dst = _data;
const BitWord* src = other._data;
size_t common_bit_word_count = _words_per_bits(Support::min(_size, other._size));
for (size_t i = 0; i < common_bit_word_count; i++) {
dst[i] = dst[i] | src[i];
}
_clear_unused_bits();
}
ASMJIT_INLINE void _clear_unused_bits() noexcept {
uint32_t idx = _size / Support::bit_size_of<BitWord>;
uint32_t bit = _size % Support::bit_size_of<BitWord>;
if (!bit) {
return;
}
_data[idx] &= (BitWord(1) << bit) - 1u;
}
[[nodiscard]]
ASMJIT_INLINE bool equals(const ArenaBitSet& other) const noexcept {
if (_size != other._size) {
return false;
}
const BitWord* a_data = _data;
const BitWord* b_data = other._data;
size_t bit_word_count = _words_per_bits(_size);
for (size_t i = 0; i < bit_word_count; i++) {
if (a_data[i] != b_data[i]) {
return false;
}
}
return true;
}
//! \}
//! \name Memory Management
//! \{
inline void release(Arena& arena) noexcept {
if (!_data) {
return;
}
arena.free_reusable(_data, _capacity / 8u);
reset();
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Error resize(Arena& arena, size_t new_size, bool new_bits_value = false) noexcept {
return _resize(arena, new_size, new_size, new_bits_value);
}
Error _resize(Arena& arena, size_t new_size, size_t ideal_capacity, bool new_bits_value) noexcept;
Error _append(Arena& arena, bool value) noexcept;
//! \}
//! \name Iterators
//! \{
class ForEachBitSet : public Support::BitVectorIterator<BitWord> {
public:
inline explicit ForEachBitSet(const ArenaBitSet& bit_vector) noexcept
: Support::BitVectorIterator<BitWord>(bit_vector.as_span()) {}
};
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENABITSET_P_H_INCLUDED

View File

@@ -5,12 +5,12 @@
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonehash.h"
#include "../core/arena.h"
#include "../core/arenahash.h"
ASMJIT_BEGIN_NAMESPACE
// ZoneHashBase - Prime Numbers
// ArenaHashBase - Prime Numbers
// ============================
#define ASMJIT_POPULATE_PRIMES(ENTRY) \
@@ -152,108 +152,108 @@ struct HashPrime {
uint32_t rcp;
};
static const HashPrime ZoneHash_primeArray[] = {
static const HashPrime ArenaHash_prime_array[] = {
#define E(PRIME, RCP, SHIFT) { PRIME, RCP }
ASMJIT_POPULATE_PRIMES(E)
#undef E
};
static const uint8_t ZoneHash_primeShift[] = {
static const uint8_t ArenaHash_prime_shift[] = {
#define E(PRIME, RCP, SHIFT) uint8_t(SHIFT)
ASMJIT_POPULATE_PRIMES(E)
#undef E
};
// ZoneHashBase - Rehash
// ArenaHashBase - Rehash
// =====================
void ZoneHashBase::_rehash(ZoneAllocator* allocator, uint32_t primeIndex) noexcept {
ASMJIT_ASSERT(primeIndex < ASMJIT_ARRAY_SIZE(ZoneHash_primeArray));
uint32_t newCount = ZoneHash_primeArray[primeIndex].prime;
void ArenaHashBase::_rehash(Arena& arena, uint32_t prime_index) noexcept {
ASMJIT_ASSERT(prime_index < ASMJIT_ARRAY_SIZE(ArenaHash_prime_array));
uint32_t new_count = ArenaHash_prime_array[prime_index].prime;
ZoneHashNode** oldData = _data;
ZoneHashNode** newData = reinterpret_cast<ZoneHashNode**>(allocator->allocZeroed(size_t(newCount) * sizeof(ZoneHashNode*)));
ArenaHashNode** old_data = _data;
ArenaHashNode** new_data = reinterpret_cast<ArenaHashNode**>(arena.alloc_reusable_zeroed(size_t(new_count) * sizeof(ArenaHashNode*)));
// We can still store nodes into the table, but it will degrade.
if (ASMJIT_UNLIKELY(newData == nullptr)) {
if (ASMJIT_UNLIKELY(new_data == nullptr)) {
return;
}
uint32_t i;
uint32_t oldCount = _bucketsCount;
uint32_t old_count = _buckets_count;
_data = newData;
_bucketsCount = newCount;
_bucketsGrow = uint32_t(newCount * 0.9);
_rcpValue = ZoneHash_primeArray[primeIndex].rcp;
_rcpShift = ZoneHash_primeShift[primeIndex];
_primeIndex = uint8_t(primeIndex);
_data = new_data;
_buckets_count = new_count;
_buckets_grow = uint32_t(new_count * 0.9);
_rcp_value = ArenaHash_prime_array[prime_index].rcp;
_rcp_shift = ArenaHash_prime_shift[prime_index];
_prime_index = uint8_t(prime_index);
for (i = 0; i < oldCount; i++) {
ZoneHashNode* node = oldData[i];
for (i = 0; i < old_count; i++) {
ArenaHashNode* node = old_data[i];
while (node) {
ZoneHashNode* next = node->_hashNext;
uint32_t hashMod = _calcMod(node->_hashCode);
ArenaHashNode* next = node->_hash_next;
uint32_t hash_mod = _calc_mod(node->_hash_code);
node->_hashNext = newData[hashMod];
newData[hashMod] = node;
node->_hash_next = new_data[hash_mod];
new_data[hash_mod] = node;
node = next;
}
}
if (oldData != _embedded) {
allocator->release(oldData, oldCount * sizeof(ZoneHashNode*));
if (old_data != _embedded) {
arena.free_reusable(old_data, old_count * sizeof(ArenaHashNode*));
}
}
// ZoneHashBase - Operations
// ArenaHashBase - Operations
// =========================
ZoneHashNode* ZoneHashBase::_insert(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
uint32_t hashMod = _calcMod(node->_hashCode);
ZoneHashNode* next = _data[hashMod];
ArenaHashNode* ArenaHashBase::_insert(Arena& arena, ArenaHashNode* node) noexcept {
uint32_t hash_mod = _calc_mod(node->_hash_code);
ArenaHashNode* next = _data[hash_mod];
node->_hashNext = next;
_data[hashMod] = node;
node->_hash_next = next;
_data[hash_mod] = node;
if (++_size > _bucketsGrow) {
uint32_t primeIndex = Support::min<uint32_t>(_primeIndex + 2, ASMJIT_ARRAY_SIZE(ZoneHash_primeArray) - 1);
if (primeIndex > _primeIndex) {
_rehash(allocator, primeIndex);
if (++_size > _buckets_grow) {
uint32_t prime_index = Support::min<uint32_t>(_prime_index + 2, ASMJIT_ARRAY_SIZE(ArenaHash_prime_array) - 1);
if (prime_index > _prime_index) {
_rehash(arena, prime_index);
}
}
return node;
}
ZoneHashNode* ZoneHashBase::_remove(ZoneAllocator* allocator, ZoneHashNode* node) noexcept {
DebugUtils::unused(allocator);
uint32_t hashMod = _calcMod(node->_hashCode);
ArenaHashNode* ArenaHashBase::_remove(Arena& arena, ArenaHashNode* node) noexcept {
Support::maybe_unused(arena);
uint32_t hash_mod = _calc_mod(node->_hash_code);
ZoneHashNode** pPrev = &_data[hashMod];
ZoneHashNode* p = *pPrev;
ArenaHashNode** prev_ptr = &_data[hash_mod];
ArenaHashNode* p = *prev_ptr;
while (p) {
if (p == node) {
*pPrev = p->_hashNext;
*prev_ptr = p->_hash_next;
_size--;
return node;
}
pPrev = &p->_hashNext;
p = *pPrev;
prev_ptr = &p->_hash_next;
p = *prev_ptr;
}
return nullptr;
}
// ZoneHashBase - Tests
// ArenaHashBase - Tests
// ====================
#if defined(ASMJIT_TEST)
struct MyHashNode : public ZoneHashNode {
struct MyHashNode : public ArenaHashNode {
inline MyHashNode(uint32_t key) noexcept
: ZoneHashNode(key),
: ArenaHashNode(key),
_key(key) {}
uint32_t _key;
@@ -263,24 +263,22 @@ struct MyKeyMatcher {
inline MyKeyMatcher(uint32_t key) noexcept
: _key(key) {}
inline uint32_t hashCode() const noexcept { return _key; }
inline uint32_t hash_code() const noexcept { return _key; }
inline bool matches(const MyHashNode* node) const noexcept { return node->_key == _key; }
uint32_t _key;
};
UNIT(zone_hash) {
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
UNIT(arena_hash) {
uint32_t kCount = BrokenAPI::has_arg("--quick") ? 1000 : 10000;
Zone zone(4096);
ZoneAllocator allocator(&zone);
ZoneHash<MyHashNode> hashTable;
Arena arena(4096);
ArenaHash<MyHashNode> hash_table;
uint32_t key;
INFO("Inserting %u elements to HashTable", unsigned(kCount));
for (key = 0; key < kCount; key++) {
hashTable.insert(&allocator, zone.newT<MyHashNode>(key));
hash_table.insert(arena, arena.new_oneshot<MyHashNode>(key));
}
uint32_t count = kCount;
@@ -289,22 +287,22 @@ UNIT(zone_hash) {
MyHashNode* node;
for (key = 0; key < count; key++) {
node = hashTable.get(MyKeyMatcher(key));
node = hash_table.get(MyKeyMatcher(key));
EXPECT_NOT_NULL(node);
EXPECT_EQ(node->_key, key);
}
{
count--;
node = hashTable.get(MyKeyMatcher(count));
hashTable.remove(&allocator, node);
node = hash_table.get(MyKeyMatcher(count));
hash_table.remove(arena, node);
node = hashTable.get(MyKeyMatcher(count));
node = hash_table.get(MyKeyMatcher(count));
EXPECT_NULL(node);
}
} while (count);
EXPECT_TRUE(hashTable.empty());
EXPECT_TRUE(hash_table.is_empty());
}
#endif

198
src/asmjit/core/arenahash.h Normal file
View File

@@ -0,0 +1,198 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENAHASH_H_INCLUDED
#define ASMJIT_CORE_ARENAHASH_H_INCLUDED
#include "../core/arena.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
//! Node used by \ref ArenaHash template.
//!
//! You must provide function `bool eq(const Key& key)` in order to make `ArenaHash::get()` working.
class ArenaHashNode {
public:
ASMJIT_NONCOPYABLE(ArenaHashNode)
ASMJIT_INLINE_NODEBUG explicit ArenaHashNode(uint32_t hash_code = 0u, uint32_t custom_data = 0u) noexcept
: _hash_code(hash_code),
_custom_data(custom_data) {}
//! Next node in the chain, null if it terminates the chain.
ArenaHashNode* _hash_next {};
//! Precalculated hash-code of key.
uint32_t _hash_code {};
//! Padding, can be reused by any Node that inherits `ArenaHashNode`.
uint32_t _custom_data {};
};
//! Base class used by \ref ArenaHash template
class ArenaHashBase {
public:
ASMJIT_NONCOPYABLE(ArenaHashBase)
//! Buckets data.
ArenaHashNode** _data;
//! Count of records inserted into the hash table.
size_t _size;
//! Count of hash buckets.
uint32_t _buckets_count;
//! When buckets array should grow (only checked after insertion).
uint32_t _buckets_grow;
//! Reciprocal value of `_buckets_count`.
uint32_t _rcp_value;
//! How many bits to shift right when hash is multiplied with `_rcp_value`.
uint8_t _rcp_shift;
//! Prime value index in internal prime array.
uint8_t _prime_index;
//! Embedded data, used by empty hash tables.
ArenaHashNode* _embedded[1];
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaHashBase() noexcept {
reset();
}
inline ArenaHashBase(ArenaHashBase&& other) noexcept {
_data = other._data;
_size = other._size;
_buckets_count = other._buckets_count;
_buckets_grow = other._buckets_grow;
_rcp_value = other._rcp_value;
_rcp_shift = other._rcp_shift;
_prime_index = other._prime_index;
_embedded[0] = other._embedded[0];
if (_data == other._embedded) {
_data = _embedded;
}
}
inline void reset() noexcept {
_data = _embedded;
_size = 0;
_buckets_count = 1;
_buckets_grow = 1;
_rcp_value = 1;
_rcp_shift = 0;
_prime_index = 0;
_embedded[0] = nullptr;
}
inline void release(Arena& arena) noexcept {
ArenaHashNode** old_data = _data;
if (old_data != _embedded) {
arena.free_reusable(old_data, _buckets_count * sizeof(ArenaHashNode*));
}
reset();
}
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! \}
//! \name Utilities
//! \{
inline void _swap(ArenaHashBase& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_buckets_count, other._buckets_count);
std::swap(_buckets_grow, other._buckets_grow);
std::swap(_rcp_value, other._rcp_value);
std::swap(_rcp_shift, other._rcp_shift);
std::swap(_prime_index, other._prime_index);
std::swap(_embedded[0], other._embedded[0]);
if (_data == other._embedded) {
_data = _embedded;
}
if (other._data == _embedded) {
other._data = other._embedded;
}
}
//! \cond INTERNAL
inline uint32_t _calc_mod(uint32_t hash) const noexcept {
uint32_t x = uint32_t((uint64_t(hash) * _rcp_value) >> _rcp_shift);
return hash - x * _buckets_count;
}
ASMJIT_API void _rehash(Arena& arena, uint32_t prime_index) noexcept;
ASMJIT_API ArenaHashNode* _insert(Arena& arena, ArenaHashNode* node) noexcept;
ASMJIT_API ArenaHashNode* _remove(Arena& arena, ArenaHashNode* node) noexcept;
//! \endcond
//! \}
};
//! Low-level hash table specialized for storing string keys and POD values.
//!
//! This hash table allows duplicates to be inserted (the API is so low level that it's up to you if you allow it or
//! not, as you should first `get()` the node and then modify it or insert a new node by using `insert()`, depending
//! on the intention).
template<typename NodeT>
class ArenaHash : public ArenaHashBase {
public:
ASMJIT_NONCOPYABLE(ArenaHash)
using Node = NodeT;
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaHash() noexcept
: ArenaHashBase() {}
ASMJIT_INLINE_NODEBUG ArenaHash(ArenaHash&& other) noexcept
: ArenaHash(other) {}
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE_NODEBUG void swap(ArenaHash& other) noexcept { ArenaHashBase::_swap(other); }
template<typename KeyT>
[[nodiscard]]
inline NodeT* get(const KeyT& key) const noexcept {
uint32_t hash_mod = _calc_mod(key.hash_code());
NodeT* node = static_cast<NodeT*>(_data[hash_mod]);
while (node && !key.matches(node)) {
node = static_cast<NodeT*>(node->_hash_next);
}
return node;
}
ASMJIT_INLINE_NODEBUG NodeT* insert(Arena& arena, NodeT* node) noexcept { return static_cast<NodeT*>(_insert(arena, node)); }
ASMJIT_INLINE_NODEBUG NodeT* remove(Arena& arena, NodeT* node) noexcept { return static_cast<NodeT*>(_remove(arena, node)); }
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENAHASH_H_INCLUDED

View File

@@ -4,34 +4,34 @@
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/zone.h"
#include "../core/zonelist.h"
#include "../core/arena.h"
#include "../core/arenalist.h"
ASMJIT_BEGIN_NAMESPACE
// ZoneList - Tests
// ArenaList - Tests
// ================
#if defined(ASMJIT_TEST)
class MyListNode : public ZoneListNode<MyListNode> {};
class MyListNode : public ArenaListNode<MyListNode> {};
UNIT(zone_list) {
Zone zone(4096);
ZoneList<MyListNode> list;
UNIT(arena_list) {
Arena arena(4096);
ArenaList<MyListNode> list;
MyListNode* a = zone.newT<MyListNode>();
MyListNode* b = zone.newT<MyListNode>();
MyListNode* c = zone.newT<MyListNode>();
MyListNode* d = zone.newT<MyListNode>();
MyListNode* a = arena.new_oneshot<MyListNode>();
MyListNode* b = arena.new_oneshot<MyListNode>();
MyListNode* c = arena.new_oneshot<MyListNode>();
MyListNode* d = arena.new_oneshot<MyListNode>();
INFO("Append / Unlink");
// []
EXPECT_TRUE(list.empty());
EXPECT_TRUE(list.is_empty());
// [A]
list.append(a);
EXPECT_FALSE(list.empty());
EXPECT_FALSE(list.is_empty());
EXPECT_EQ(list.first(), a);
EXPECT_EQ(list.last(), a);
EXPECT_NULL(a->prev());
@@ -79,7 +79,7 @@ UNIT(zone_list) {
// []
list.unlink(b);
EXPECT_TRUE(list.empty());
EXPECT_TRUE(list.is_empty());
EXPECT_NULL(list.first());
EXPECT_NULL(list.last());
EXPECT_NULL(b->prev());
@@ -89,7 +89,7 @@ UNIT(zone_list) {
// [A]
list.prepend(a);
EXPECT_FALSE(list.empty());
EXPECT_FALSE(list.is_empty());
EXPECT_EQ(list.first(), a);
EXPECT_EQ(list.last(), a);
EXPECT_NULL(a->prev());
@@ -107,7 +107,7 @@ UNIT(zone_list) {
INFO("InsertAfter / InsertBefore");
// [B, A, C]
list.insertAfter(a, c);
list.insert_after(a, c);
EXPECT_EQ(list.first(), b);
EXPECT_EQ(list.last(), c);
EXPECT_NULL(b->prev());
@@ -118,7 +118,7 @@ UNIT(zone_list) {
EXPECT_NULL(c->next());
// [B, D, A, C]
list.insertBefore(a, d);
list.insert_before(a, d);
EXPECT_EQ(list.first(), b);
EXPECT_EQ(list.last(), c);
EXPECT_NULL(b->prev());
@@ -133,7 +133,7 @@ UNIT(zone_list) {
INFO("PopFirst / Pop");
// [D, A, C]
EXPECT_EQ(list.popFirst(), b);
EXPECT_EQ(list.pop_first(), b);
EXPECT_NULL(b->prev());
EXPECT_NULL(b->next());

View File

@@ -3,21 +3,21 @@
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONELIST_H_INCLUDED
#define ASMJIT_CORE_ZONELIST_H_INCLUDED
#ifndef ASMJIT_CORE_ARENALIST_H_INCLUDED
#define ASMJIT_CORE_ARENALIST_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \addtogroup asmjit_support
//! \{
//! Node used by \ref ZoneList template.
//! Node used by \ref ArenaList template.
template<typename NodeT>
class ZoneListNode {
class ArenaListNode {
public:
ASMJIT_NONCOPYABLE(ZoneListNode)
ASMJIT_NONCOPYABLE(ArenaListNode)
//! \name Constants
//! \{
@@ -30,18 +30,18 @@ public:
//! \name Members
//! \{
NodeT* _listNodes[2];
NodeT* _list_nodes[2];
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ZoneListNode() noexcept
: _listNodes{nullptr, nullptr} {}
ASMJIT_INLINE_NODEBUG ArenaListNode() noexcept
: _list_nodes{nullptr, nullptr} {}
ASMJIT_INLINE_NODEBUG ZoneListNode(ZoneListNode&& other) noexcept
: _listNodes{other._listNodes[0], other._listNodes[1]} {}
ASMJIT_INLINE_NODEBUG ArenaListNode(ArenaListNode&& other) noexcept
: _list_nodes{other._list_nodes[0], other._list_nodes[1]} {}
//! \}
@@ -49,25 +49,25 @@ public:
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPrev() const noexcept { return _listNodes[kNodeIndexPrev] != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_prev() const noexcept { return _list_nodes[kNodeIndexPrev] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasNext() const noexcept { return _listNodes[kNodeIndexNext] != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_next() const noexcept { return _list_nodes[kNodeIndexNext] != nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _listNodes[kNodeIndexPrev]; }
ASMJIT_INLINE_NODEBUG NodeT* prev() const noexcept { return _list_nodes[kNodeIndexPrev]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _listNodes[kNodeIndexNext]; }
ASMJIT_INLINE_NODEBUG NodeT* next() const noexcept { return _list_nodes[kNodeIndexNext]; }
//! \}
};
//! Zone allocated list container that uses nodes of `NodeT` type.
//! Arena-allocated list container that uses nodes of `NodeT` type.
template <typename NodeT>
class ZoneList {
class ArenaList {
public:
ASMJIT_NONCOPYABLE(ZoneList)
ASMJIT_NONCOPYABLE(ArenaList)
//! \name Constants
//! \{
@@ -87,9 +87,9 @@ public:
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ZoneList() noexcept {}
ASMJIT_INLINE_NODEBUG ArenaList() noexcept {}
ASMJIT_INLINE_NODEBUG ZoneList(ZoneList&& other) noexcept
ASMJIT_INLINE_NODEBUG ArenaList(ArenaList&& other) noexcept
: _nodes { other._nodes[0], other._nodes[1] } {}
ASMJIT_INLINE_NODEBUG void reset() noexcept {
@@ -103,7 +103,7 @@ public:
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _nodes[0] == nullptr; }
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _nodes[0] == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* first() const noexcept { return _nodes[kNodeIndexFirst]; }
@@ -116,19 +116,19 @@ public:
//! \name Utilities
//! \{
ASMJIT_INLINE_NODEBUG void swap(ZoneList& other) noexcept {
ASMJIT_INLINE_NODEBUG void swap(ArenaList& other) noexcept {
std::swap(_nodes[0], other._nodes[0]);
std::swap(_nodes[1], other._nodes[1]);
}
// Can be used to both append and prepend.
inline void _addNode(NodeT* node, size_t dir) noexcept {
inline void _add_node(NodeT* node, size_t dir) noexcept {
NodeT* prev = _nodes[dir];
node->_listNodes[!dir] = prev;
node->_list_nodes[!dir] = prev;
_nodes[dir] = node;
if (prev) {
prev->_listNodes[dir] = node;
prev->_list_nodes[dir] = node;
}
else {
_nodes[!dir] = node;
@@ -136,45 +136,45 @@ public:
}
// Can be used to both append and prepend.
inline void _insertNode(NodeT* ref, NodeT* node, size_t dir) noexcept {
inline void _insert_node(NodeT* ref, NodeT* node, size_t dir) noexcept {
ASMJIT_ASSERT(ref != nullptr);
NodeT* prev = ref;
NodeT* next = ref->_listNodes[dir];
NodeT* next = ref->_list_nodes[dir];
prev->_listNodes[dir] = node;
prev->_list_nodes[dir] = node;
if (next) {
next->_listNodes[!dir] = node;
next->_list_nodes[!dir] = node;
}
else {
_nodes[dir] = node;
}
node->_listNodes[!dir] = prev;
node->_listNodes[ dir] = next;
node->_list_nodes[!dir] = prev;
node->_list_nodes[ dir] = next;
}
ASMJIT_INLINE_NODEBUG void append(NodeT* node) noexcept { _addNode(node, kNodeIndexLast); }
ASMJIT_INLINE_NODEBUG void prepend(NodeT* node) noexcept { _addNode(node, kNodeIndexFirst); }
ASMJIT_INLINE_NODEBUG void append(NodeT* node) noexcept { _add_node(node, kNodeIndexLast); }
ASMJIT_INLINE_NODEBUG void prepend(NodeT* node) noexcept { _add_node(node, kNodeIndexFirst); }
ASMJIT_INLINE_NODEBUG void insertAfter(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, NodeT::kNodeIndexNext); }
ASMJIT_INLINE_NODEBUG void insertBefore(NodeT* ref, NodeT* node) noexcept { _insertNode(ref, node, NodeT::kNodeIndexPrev); }
ASMJIT_INLINE_NODEBUG void insert_after(NodeT* ref, NodeT* node) noexcept { _insert_node(ref, node, NodeT::kNodeIndexNext); }
ASMJIT_INLINE_NODEBUG void insert_before(NodeT* ref, NodeT* node) noexcept { _insert_node(ref, node, NodeT::kNodeIndexPrev); }
inline NodeT* unlink(NodeT* node) noexcept {
NodeT* prev = node->prev();
NodeT* next = node->next();
if (prev) { prev->_listNodes[1] = next; } else { _nodes[0] = next; }
if (next) { next->_listNodes[0] = prev; } else { _nodes[1] = prev; }
if (prev) { prev->_list_nodes[1] = next; } else { _nodes[0] = next; }
if (next) { next->_list_nodes[0] = prev; } else { _nodes[1] = prev; }
node->_listNodes[0] = nullptr;
node->_listNodes[1] = nullptr;
node->_list_nodes[0] = nullptr;
node->_list_nodes[1] = nullptr;
return node;
}
[[nodiscard]]
inline NodeT* popFirst() noexcept {
inline NodeT* pop_first() noexcept {
NodeT* node = _nodes[0];
ASMJIT_ASSERT(node != nullptr);
@@ -182,8 +182,8 @@ public:
_nodes[0] = next;
if (next) {
next->_listNodes[0] = nullptr;
node->_listNodes[1] = nullptr;
next->_list_nodes[0] = nullptr;
node->_list_nodes[1] = nullptr;
}
else {
_nodes[1] = nullptr;
@@ -201,8 +201,8 @@ public:
_nodes[1] = prev;
if (prev) {
prev->_listNodes[1] = nullptr;
node->_listNodes[0] = nullptr;
prev->_list_nodes[1] = nullptr;
node->_list_nodes[0] = nullptr;
}
else {
_nodes[0] = nullptr;
@@ -218,4 +218,4 @@ public:
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONELIST_H_INCLUDED
#endif // ASMJIT_CORE_ARENALIST_H_INCLUDED

View File

@@ -0,0 +1,66 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENAPOOL_H_INCLUDED
#define ASMJIT_CORE_ARENAPOOL_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
//! Helper class for implementing pooling of arena-allocated objects.
template<typename T, size_t Size = sizeof(T)>
class ArenaPool {
public:
ASMJIT_NONCOPYABLE(ArenaPool)
struct Link { Link* next; };
Link* _data {};
ASMJIT_INLINE_NODEBUG ArenaPool() noexcept = default;
//! Resets the arena pool.
//!
//! Reset must be called before the associated \ref Arena is reset or destroyed to invalidate all pooled chunks.
ASMJIT_INLINE_NODEBUG void reset() noexcept { _data = nullptr; }
//! Allocates a memory (or reuses the existing allocation) of `Size` (in bytes).
[[nodiscard]]
ASMJIT_INLINE T* alloc(Arena& arena) noexcept {
Link* p = _data;
if (ASMJIT_UNLIKELY(p == nullptr)) {
return arena.alloc_oneshot<T>(Arena::aligned_size(Size));
}
_data = p->next;
return static_cast<T*>(static_cast<void*>(p));
}
//! Pools the previously allocated memory.
ASMJIT_INLINE void release(T* ptr) noexcept {
ASMJIT_ASSERT(ptr != nullptr);
Link* p = reinterpret_cast<Link*>(ptr);
p->next = _data;
_data = p;
}
ASMJIT_INLINE size_t pooled_item_count() const noexcept {
size_t n = 0;
Link* p = _data;
while (p) {
n++;
p = p->next;
}
return n;
}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENAPOOL_H_INCLUDED

View File

@@ -3,19 +3,19 @@
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ZONESTRING_H_INCLUDED
#define ASMJIT_CORE_ZONESTRING_H_INCLUDED
#ifndef ASMJIT_CORE_ARENASTRING_H_INCLUDED
#define ASMJIT_CORE_ARENASTRING_H_INCLUDED
#include "../core/globals.h"
#include "../core/zone.h"
#include "../core/arena.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_zone
//! \addtogroup asmjit_support
//! \{
//! A helper class used by \ref ZoneString implementation.
struct ZoneStringBase {
//! A helper class used by \ref ArenaString implementation.
struct ArenaStringBase {
union {
struct {
uint32_t _size;
@@ -32,38 +32,38 @@ struct ZoneStringBase {
_external = nullptr;
}
Error setData(Zone* zone, uint32_t maxEmbeddedSize, const char* str, size_t size) noexcept {
Error set_data(Arena& arena, uint32_t max_embedded_size, const char* str, size_t size) noexcept {
if (size == SIZE_MAX)
size = strlen(str);
if (size <= maxEmbeddedSize) {
if (size <= max_embedded_size) {
memcpy(_embedded, str, size);
_embedded[size] = '\0';
}
else {
char* external = static_cast<char*>(zone->dup(str, size, true));
char* external = static_cast<char*>(arena.dup(str, size, true));
if (ASMJIT_UNLIKELY(!external))
return DebugUtils::errored(kErrorOutOfMemory);
return make_error(Error::kOutOfMemory);
_external = external;
}
_size = uint32_t(size);
return kErrorOk;
return Error::kOk;
}
};
//! A string template that can be zone allocated.
//! A string template that can be arena-allocated.
//!
//! Helps with creating strings that can be either statically allocated if they are small, or externally allocated
//! in case their size exceeds the limit. The `N` represents the size of the whole `ZoneString` structure, based on
//! in case their size exceeds the limit. The `N` represents the size of the whole `ArenaString` structure, based on
//! that size the maximum size of the internal buffer is determined.
template<size_t N>
class ZoneString {
class ArenaString {
public:
//! \name Constants
//! \{
static inline constexpr uint32_t kWholeSize = (N > sizeof(ZoneStringBase)) ? uint32_t(N) : uint32_t(sizeof(ZoneStringBase));
static inline constexpr uint32_t kWholeSize = (N > sizeof(ArenaStringBase)) ? uint32_t(N) : uint32_t(sizeof(ArenaStringBase));
static inline constexpr uint32_t kMaxEmbeddedSize = kWholeSize - 5;
//! \}
@@ -72,8 +72,8 @@ public:
//! \{
union {
ZoneStringBase _base;
char _wholeData[kWholeSize];
ArenaStringBase _base;
char _whole_data[kWholeSize];
};
//! \}
@@ -81,7 +81,7 @@ public:
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ZoneString() noexcept { reset(); }
ASMJIT_INLINE_NODEBUG ArenaString() noexcept { reset(); }
ASMJIT_INLINE_NODEBUG void reset() noexcept { _base.reset(); }
//! \}
@@ -91,7 +91,7 @@ public:
//! Tests whether the string is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _base._size == 0; }
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _base._size == 0; }
//! Returns the string data.
[[nodiscard]]
@@ -103,14 +103,14 @@ public:
//! Tests whether the string is embedded (e.g. no dynamically allocated).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isEmbedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
ASMJIT_INLINE_NODEBUG bool is_embedded() const noexcept { return _base._size <= kMaxEmbeddedSize; }
//! Copies a new `data` of the given `size` to the string.
//!
//! If the `size` exceeds the internal buffer the given `zone` will be used to duplicate the data, otherwise
//! If the `size` exceeds the internal buffer the given `arena` will be used to duplicate the data, otherwise
//! the internal buffer will be used as a storage.
ASMJIT_INLINE_NODEBUG Error setData(Zone* zone, const char* data, size_t size) noexcept {
return _base.setData(zone, kMaxEmbeddedSize, data, size);
ASMJIT_INLINE_NODEBUG Error set_data(Arena& arena, const char* data, size_t size) noexcept {
return _base.set_data(arena, kMaxEmbeddedSize, data, size);
}
//! \}
@@ -120,4 +120,4 @@ public:
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ZONESTRING_H_INCLUDED
#endif // ASMJIT_CORE_ARENASTRING_H_INCLUDED

View File

@@ -4,26 +4,26 @@
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/arena.h"
#include "../core/arenatree.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
// ZoneTreeBase - Tests
// ArenaTreeBase - Tests
// ====================
#if defined(ASMJIT_TEST)
template<typename NodeT>
struct ZoneRBUnit {
using Tree = ZoneTree<NodeT>;
struct ArenaRBUnit {
using Tree = ArenaTree<NodeT>;
static void verifyTree(Tree& tree) noexcept {
EXPECT_GT(checkHeight(static_cast<NodeT*>(tree._root)), 0);
static void verify_tree(Tree& tree) noexcept {
EXPECT_GT(check_height(static_cast<NodeT*>(tree._root)), 0);
}
// Check whether the Red-Black tree is valid.
static int checkHeight(NodeT* node) noexcept {
static int check_height(NodeT* node) noexcept {
if (!node) return 1;
NodeT* ln = node->left();
@@ -34,19 +34,19 @@ struct ZoneRBUnit {
EXPECT_TRUE(rn == nullptr || *rn > *node);
// Red violation.
EXPECT_TRUE(!node->isRed() || (!ZoneTreeNode::_isValidRed(ln) && !ZoneTreeNode::_isValidRed(rn)));
EXPECT_TRUE(!node->is_red() || (!ArenaTreeNode::_is_valid_red(ln) && !ArenaTreeNode::_is_valid_red(rn)));
// Black violation.
int lh = checkHeight(ln);
int rh = checkHeight(rn);
int lh = check_height(ln);
int rh = check_height(rn);
EXPECT_TRUE(!lh || !rh || lh == rh);
// Only count black links.
return (lh && rh) ? lh + !node->isRed() : 0;
return (lh && rh) ? lh + !node->is_red() : 0;
}
};
class MyRBNode : public ZoneTreeNodeT<MyRBNode> {
class MyRBNode : public ArenaTreeNodeT<MyRBNode> {
public:
ASMJIT_NONCOPYABLE(MyRBNode)
@@ -56,23 +56,23 @@ public:
inline bool operator<(const MyRBNode& other) const noexcept { return _key < other._key; }
inline bool operator>(const MyRBNode& other) const noexcept { return _key > other._key; }
inline bool operator<(uint32_t queryKey) const noexcept { return _key < queryKey; }
inline bool operator>(uint32_t queryKey) const noexcept { return _key > queryKey; }
inline bool operator<(uint32_t query_key) const noexcept { return _key < query_key; }
inline bool operator>(uint32_t query_key) const noexcept { return _key > query_key; }
uint32_t _key;
};
UNIT(zone_rbtree) {
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 10000;
UNIT(arena_rbtree) {
uint32_t kCount = BrokenAPI::has_arg("--quick") ? 1000 : 10000;
Zone zone(4096);
ZoneTree<MyRBNode> rbTree;
Arena arena(4096);
ArenaTree<MyRBNode> rb_tree;
uint32_t key;
INFO("Inserting %u elements to RBTree and validating each operation", unsigned(kCount));
for (key = 0; key < kCount; key++) {
rbTree.insert(zone.newT<MyRBNode>(key));
ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
rb_tree.insert(arena.new_oneshot<MyRBNode>(key));
ArenaRBUnit<MyRBNode>::verify_tree(rb_tree);
}
uint32_t count = kCount;
@@ -81,17 +81,17 @@ UNIT(zone_rbtree) {
MyRBNode* node;
for (key = 0; key < count; key++) {
node = rbTree.get(key);
node = rb_tree.get(key);
EXPECT_NOT_NULL(node);
EXPECT_EQ(node->_key, key);
}
node = rbTree.get(--count);
rbTree.remove(node);
ZoneRBUnit<MyRBNode>::verifyTree(rbTree);
node = rb_tree.get(--count);
rb_tree.remove(node);
ArenaRBUnit<MyRBNode>::verify_tree(rb_tree);
} while (count);
EXPECT_TRUE(rbTree.empty());
EXPECT_TRUE(rb_tree.is_empty());
}
#endif

407
src/asmjit/core/arenatree.h Normal file
View File

@@ -0,0 +1,407 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENATREE_H_INCLUDED
#define ASMJIT_CORE_ARENATREE_H_INCLUDED
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
//! RB-Tree node.
//!
//! The color is stored in a least significant bit of the `left` node.
//!
//! WARNING: Always use accessors to access left and right children.
class ArenaTreeNode {
public:
ASMJIT_NONCOPYABLE(ArenaTreeNode)
//! \name Constants
//! \{
static inline constexpr uintptr_t kRedMask = 0x1;
static inline constexpr uintptr_t kPtrMask = ~kRedMask;
//! \}
//! \name Members
//! \{
uintptr_t _tree_nodes[2] {};
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaTreeNode() noexcept {}
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is_red() const noexcept { return static_cast<bool>(_tree_nodes[0] & kRedMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool has_child(size_t i) const noexcept { return _tree_nodes[i] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool has_left() const noexcept { return _tree_nodes[0] > kRedMask; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool has_right() const noexcept { return _tree_nodes[1] != 0; }
template<typename T = ArenaTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* child(size_t i) const noexcept { return static_cast<T*>(_get_child(i)); }
template<typename T = ArenaTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* left() const noexcept { return static_cast<T*>(_get_left()); }
template<typename T = ArenaTreeNode>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* right() const noexcept { return static_cast<T*>(_get_right()); }
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArenaTreeNode* _get_child(size_t i) const noexcept { return (ArenaTreeNode*)(_tree_nodes[i] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArenaTreeNode* _get_left() const noexcept { return (ArenaTreeNode*)(_tree_nodes[0] & kPtrMask); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArenaTreeNode* _get_right() const noexcept { return (ArenaTreeNode*)(_tree_nodes[1]); }
ASMJIT_INLINE_NODEBUG void _set_child(size_t i, ArenaTreeNode* node) noexcept { _tree_nodes[i] = (_tree_nodes[i] & kRedMask) | (uintptr_t)node; }
ASMJIT_INLINE_NODEBUG void _set_left(ArenaTreeNode* node) noexcept { _tree_nodes[0] = (_tree_nodes[0] & kRedMask) | (uintptr_t)node; }
ASMJIT_INLINE_NODEBUG void _set_right(ArenaTreeNode* node) noexcept { _tree_nodes[1] = (uintptr_t)node; }
ASMJIT_INLINE_NODEBUG void _make_red() noexcept { _tree_nodes[0] |= kRedMask; }
ASMJIT_INLINE_NODEBUG void _make_black() noexcept { _tree_nodes[0] &= kPtrMask; }
//! Tests whether the node is RED (RED node must be non-null and must have RED flag set).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool _is_valid_red(ArenaTreeNode* node) noexcept { return node && node->is_red(); }
//! \}
//! \endcond
};
//! RB-Tree node casted to `NodeT`.
template<typename NodeT>
class ArenaTreeNodeT : public ArenaTreeNode {
public:
ASMJIT_NONCOPYABLE(ArenaTreeNodeT)
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaTreeNodeT() noexcept
: ArenaTreeNode() {}
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* child(size_t i) const noexcept { return static_cast<NodeT*>(_get_child(i)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* left() const noexcept { return static_cast<NodeT*>(_get_left()); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* right() const noexcept { return static_cast<NodeT*>(_get_right()); }
//! \}
};
//! RB-Tree.
template<typename NodeT>
class ArenaTree {
public:
ASMJIT_NONCOPYABLE(ArenaTree)
using Node = NodeT;
NodeT* _root {};
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG ArenaTree() noexcept {}
ASMJIT_INLINE_NODEBUG ArenaTree(ArenaTree&& other) noexcept
: _root(other._root) {}
ASMJIT_INLINE_NODEBUG void reset() noexcept { _root = nullptr; }
//! \}
//! \name Accessors
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _root == nullptr; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG NodeT* root() const noexcept { return static_cast<NodeT*>(_root); }
//! \}
//! \name Utilities
//! \{
ASMJIT_INLINE_NODEBUG void swap(ArenaTree& other) noexcept {
std::swap(_root, other._root);
}
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
void insert(NodeT* ASMJIT_NONNULL(node), const CompareT& cmp = CompareT()) noexcept {
// Node to insert must not contain garbage.
ASMJIT_ASSERT(!node->has_left());
ASMJIT_ASSERT(!node->has_right());
ASMJIT_ASSERT(!node->is_red());
if (!_root) {
_root = node;
return;
}
ArenaTreeNode head; // False root node,
head._set_right(_root); // having root on the right.
ArenaTreeNode* g = nullptr; // Grandparent.
ArenaTreeNode* p = nullptr; // Parent.
ArenaTreeNode* t = &head; // Iterator.
ArenaTreeNode* q = _root; // Query.
size_t dir = 0; // Direction for accessing child nodes.
size_t last = 0; // Not needed to initialize, but makes some tools happy.
node->_make_red(); // New nodes are always red and violations fixed appropriately.
// Search down the tree.
for (;;) {
if (!q) {
// Insert new node at the bottom.
q = node;
p->_set_child(dir, node);
}
else if (_is_valid_red(q->_get_left()) && _is_valid_red(q->_get_right())) {
// Color flip.
q->_make_red();
q->_get_left()->_make_black();
q->_get_right()->_make_black();
}
// Fix red violation.
if (_is_valid_red(q) && _is_valid_red(p)) {
ASMJIT_ASSUME(g != nullptr);
ASMJIT_ASSUME(p != nullptr);
t->_set_child(t->_get_right() == g,
q == p->_get_child(last) ? _single_rotate(g, !last) : _double_rotate(g, !last));
}
// Stop if found.
if (q == node) {
break;
}
last = dir;
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Update helpers.
if (g) {
t = g;
}
g = p;
p = q;
q = q->_get_child(dir);
}
// Update root and make it black.
_root = static_cast<NodeT*>(head._get_right());
_root->_make_black();
}
//! Remove node from RBTree.
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
void remove(ArenaTreeNode* ASMJIT_NONNULL(node), const CompareT& cmp = CompareT()) noexcept {
ArenaTreeNode head; // False root node,
head._set_right(_root); // having root on the right.
ArenaTreeNode* g = nullptr; // Grandparent.
ArenaTreeNode* p = nullptr; // Parent.
ArenaTreeNode* q = &head; // Query.
ArenaTreeNode* f = nullptr; // Found item.
ArenaTreeNode* gf = nullptr; // Found grandparent.
size_t dir = 1; // Direction (0 or 1).
// Search and push a red down.
while (q->has_child(dir)) {
size_t last = dir;
// Update helpers.
g = p;
p = q;
q = q->_get_child(dir);
dir = cmp(*static_cast<NodeT*>(q), *static_cast<NodeT*>(node)) < 0;
// Save found node.
if (q == node) {
f = q;
gf = g;
}
// Push the red node down.
if (!_is_valid_red(q) && !_is_valid_red(q->_get_child(dir))) {
if (_is_valid_red(q->_get_child(!dir))) {
ArenaTreeNode* child = _single_rotate(q, dir);
p->_set_child(last, child);
p = child;
}
else if (!_is_valid_red(q->_get_child(!dir)) && p->_get_child(!last)) {
ArenaTreeNode* s = p->_get_child(!last);
if (!_is_valid_red(s->_get_child(!last)) && !_is_valid_red(s->_get_child(last))) {
// Color flip.
p->_make_black();
s->_make_red();
q->_make_red();
}
else {
ASMJIT_ASSUME(g != nullptr);
ASMJIT_ASSUME(s != nullptr);
size_t dir2 = g->_get_right() == p;
ArenaTreeNode* child = g->_get_child(dir2);
if (_is_valid_red(s->_get_child(last))) {
child = _double_rotate(p, last);
g->_set_child(dir2, child);
}
else if (_is_valid_red(s->_get_child(!last))) {
child = _single_rotate(p, last);
g->_set_child(dir2, child);
}
// Ensure correct coloring.
q->_make_red();
child->_make_red();
child->_get_left()->_make_black();
child->_get_right()->_make_black();
}
}
}
}
// Replace and remove.
ASMJIT_ASSERT(f != nullptr);
ASMJIT_ASSERT(f != &head);
ASMJIT_ASSERT(q != &head);
p->_set_child(p->_get_right() == q,
q->_get_child(q->_get_left() == nullptr));
// NOTE: The original algorithm used a trick to just copy 'key/value' to `f` and mark `q` for deletion. But this
// is unacceptable here as we really want to destroy the passed `node`. So, we have to make sure that we have
// really removed `f` and not `q`.
if (f != q) {
ASMJIT_ASSERT(f != &head);
ASMJIT_ASSERT(f != gf);
ArenaTreeNode* n = gf ? gf : &head;
dir = (n == &head) ? 1 : cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
for (;;) {
if (n->_get_child(dir) == f) {
n->_set_child(dir, q);
// RAW copy, including the color.
q->_tree_nodes[0] = f->_tree_nodes[0];
q->_tree_nodes[1] = f->_tree_nodes[1];
break;
}
n = n->_get_child(dir);
// Cannot be true as we know that it must reach `f` in few iterations.
ASMJIT_ASSERT(n != nullptr);
dir = cmp(*static_cast<NodeT*>(n), *static_cast<NodeT*>(node)) < 0;
}
}
// Update root and make it black.
_root = static_cast<NodeT*>(head._get_right());
if (_root) {
_root->_make_black();
}
}
template<typename KeyT, typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
[[nodiscard]]
inline NodeT* get(const KeyT& key, const CompareT& cmp = CompareT()) const noexcept {
ArenaTreeNode* node = _root;
while (node) {
auto result = cmp(*static_cast<const NodeT*>(node), key);
if (result == 0) {
break;
}
// Go left or right depending on the `result`.
node = node->_get_child(result < 0);
}
return static_cast<NodeT*>(node);
}
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
static inline bool _is_valid_red(ArenaTreeNode* node) noexcept { return ArenaTreeNode::_is_valid_red(node); }
//! Single rotation.
static inline ArenaTreeNode* _single_rotate(ArenaTreeNode* ASMJIT_NONNULL(root), size_t dir) noexcept {
ArenaTreeNode* save = root->_get_child(!dir);
ASMJIT_ASSUME(save != nullptr);
ArenaTreeNode* save_child = save->_get_child(dir);
root->_set_child(!dir, save_child);
save->_set_child( dir, root);
root->_make_red();
save->_make_black();
return save;
}
//! Double rotation.
static inline ArenaTreeNode* _double_rotate(ArenaTreeNode* ASMJIT_NONNULL(root), size_t dir) noexcept {
ArenaTreeNode* child = root->_get_child(!dir);
ASMJIT_ASSUME(child != nullptr);
root->_set_child(!dir, _single_rotate(child, !dir));
return _single_rotate(root, dir);
}
//! \}
//! \endcond
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENATREE_H_INCLUDED

View File

@@ -0,0 +1,293 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#include "../core/api-build_p.h"
#include "../core/support.h"
#include "../core/arena.h"
#include "../core/arenavector.h"
ASMJIT_BEGIN_NAMESPACE
// ArenaVector - Memory Management
// ===============================
// Rule based growing strategy - 32 bytes, 128 bytes, 512 bytes, and then grow exponentially until `kGrowThreshold`
// is reached.
static constexpr uint8_t ArenaVector_grow_rule(uint8_t log2_size) noexcept {
return log2_size < 1u ? uint8_t(0) :
log2_size < 2u ? uint8_t(2) :
log2_size < 4u ? uint8_t(4) :
log2_size < 6u ? uint8_t(6) :
log2_size < 8u ? uint8_t(8) : uint8_t(log2_size);
}
// The table is never used fully, only indexes up to `ctz(Support::kGrowThreshold) + 1`.
static constexpr uint8_t ArenaVector_grow_table[32] = {
ArenaVector_grow_rule( 0), ArenaVector_grow_rule( 1), ArenaVector_grow_rule( 2), ArenaVector_grow_rule( 3),
ArenaVector_grow_rule( 4), ArenaVector_grow_rule( 5), ArenaVector_grow_rule( 6), ArenaVector_grow_rule( 7),
ArenaVector_grow_rule( 8), ArenaVector_grow_rule( 9), ArenaVector_grow_rule(10), ArenaVector_grow_rule(11),
ArenaVector_grow_rule(12), ArenaVector_grow_rule(13), ArenaVector_grow_rule(14), ArenaVector_grow_rule(15),
ArenaVector_grow_rule(16), ArenaVector_grow_rule(17), ArenaVector_grow_rule(18), ArenaVector_grow_rule(19),
ArenaVector_grow_rule(20), ArenaVector_grow_rule(21), ArenaVector_grow_rule(22), ArenaVector_grow_rule(23),
ArenaVector_grow_rule(24), ArenaVector_grow_rule(25), ArenaVector_grow_rule(26), ArenaVector_grow_rule(27),
ArenaVector_grow_rule(28), ArenaVector_grow_rule(29), ArenaVector_grow_rule(30), ArenaVector_grow_rule(31)
};
static ASMJIT_INLINE size_t ArenaVector_expand_byte_size(size_t byte_size) noexcept {
ASMJIT_ASSERT(byte_size > 0u);
if (ASMJIT_LIKELY(byte_size <= Globals::kGrowThreshold)) {
uint32_t grow_table_idx = Support::bit_size_of<size_t> - Support::clz((byte_size - 1u) | 1u);
uint32_t grow_log2_size = ArenaVector_grow_table[grow_table_idx];
return size_t(1) << grow_log2_size;
}
else {
return Support::align_up(size_t(byte_size) + 1u, Globals::kGrowThreshold);
}
}
template<typename ItemSize>
static ASMJIT_NOINLINE Error ArenaVector_reserve_with_byte_size(ArenaVectorBase& self, Arena& arena, size_t byte_size, ItemSize item_size) noexcept {
size_t allocated_size;
uint8_t* new_data = static_cast<uint8_t*>(arena.alloc_reusable(byte_size, Out(allocated_size)));
if (ASMJIT_UNLIKELY(!new_data)) {
return make_error(Error::kOutOfMemory);
}
size_t allocated_capacity = Support::item_count_from_byte_size(allocated_size, item_size);
void* old_data = self._data;
uint32_t size = self._size;
if (old_data) {
memcpy(new_data, old_data, Support::byte_size_from_item_count(size, item_size));
arena.free_reusable(old_data, Support::byte_size_from_item_count(self._capacity, item_size));
}
self._data = new_data;
self._capacity = uint32_t(allocated_capacity);
return Error::kOk;
}
static ASMJIT_INLINE bool ArenaVector_is_valid_size(size_t size) noexcept {
if constexpr (sizeof(size_t) < 8u) {
// 32-bit machine - `uint32_t` is the same as `size_t` - there is no need to do any checks
// as it's impossible to end up having a container, which data uses the whole address space.
return true;
}
else {
// 64-bit machine - since we store size and capacity as `uint32_t`, we have to check whether
// the `size_t` argument actually fits `uint32_t`.
return size < size_t(0xFFFFFFFFu);
}
}
static ASMJIT_INLINE bool ArenaVector_check_byte_size(uint64_t byte_size) noexcept {
if constexpr (sizeof(size_t) < 8u) {
// 32-bit machine.
return byte_size <= 0x80000000u;
}
else {
return true;
}
}
template<typename ItemSize>
static ASMJIT_INLINE Error ArenaVector_reserve_fit(ArenaVectorBase& self, Arena& arena, size_t item_count, ItemSize item_size) noexcept {
size_t capacity = self._capacity;
size_t capacity_masked = capacity | Support::bool_as_mask<size_t>(!ArenaVector_is_valid_size(item_count));
uint64_t byte_size = Support::byte_size_from_item_count<uint64_t>(item_count, item_size);
if (ASMJIT_UNLIKELY(Support::bool_or(capacity_masked >= item_count, !ArenaVector_check_byte_size(byte_size)))) {
return capacity >= item_count ? Error::kOk : make_error(Error::kOutOfMemory);
}
return ArenaVector_reserve_with_byte_size(self, arena, size_t(byte_size), item_size);
}
template<typename ItemSize>
static ASMJIT_INLINE Error ArenaVector_reserve_grow(ArenaVectorBase& self, Arena& arena, size_t item_count, ItemSize item_size) noexcept {
size_t capacity = self._capacity;
size_t capacity_masked = capacity | Support::bool_as_mask<size_t>(!ArenaVector_is_valid_size(item_count));
uint64_t byte_size = Support::byte_size_from_item_count<uint64_t>(item_count, item_size);
if (ASMJIT_UNLIKELY(Support::bool_or(capacity_masked >= item_count, !ArenaVector_check_byte_size(byte_size)))) {
return capacity >= item_count ? Error::kOk : make_error(Error::kOutOfMemory);
}
size_t expanded_byte_size = ArenaVector_expand_byte_size(size_t(byte_size));
return ArenaVector_reserve_with_byte_size(self, arena, expanded_byte_size, item_size);
}
template<typename ItemSize>
static ASMJIT_INLINE Error ArenaVector_grow(ArenaVectorBase& self, Arena& arena, size_t n, ItemSize item_size) noexcept {
Support::FastUInt8 of {};
size_t after = Support::add_overflow<size_t>(self._size, n, &of);
if (ASMJIT_UNLIKELY(of)) {
return make_error(Error::kOutOfMemory);
}
return ArenaVector_reserve_grow(self, arena, after, item_size);
}
template<typename ItemSize>
static ASMJIT_INLINE Error ArenaVector_resize_fit(ArenaVectorBase& self, Arena& arena, size_t n, ItemSize item_size) noexcept {
size_t size = self._size;
size_t capacity = self._capacity;
if (capacity < n) {
ASMJIT_PROPAGATE(ArenaVector_reserve_fit(self, arena, n, item_size));
}
if (size < n) {
memset(static_cast<uint8_t*>(self._data) + Support::byte_size_from_item_count(size, item_size), 0, Support::byte_size_from_item_count(n - size, item_size));
}
self._size = uint32_t(n);
return Error::kOk;
}
template<typename ItemSize>
static ASMJIT_INLINE Error ArenaVector_resize_grow(ArenaVectorBase& self, Arena& arena, size_t n, ItemSize item_size) noexcept {
size_t size = self._size;
size_t capacity = self._capacity;
if (capacity < n) {
ASMJIT_PROPAGATE(ArenaVector_reserve_grow(self, arena, n, item_size));
}
if (size < n) {
memset(static_cast<uint8_t*>(self._data) + Support::byte_size_from_item_count(size, item_size), 0, Support::byte_size_from_item_count(n - size, item_size));
}
self._size = uint32_t(n);
return Error::kOk;
}
// Public API wrappers:
Error ArenaVectorBase::_reserve_fit(Arena& arena, size_t n, Support::ByteSize item_size) noexcept {
return ArenaVector_reserve_fit<Support::ByteSize>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_reserve_fit(Arena& arena, size_t n, Support::Log2Size item_size) noexcept {
return ArenaVector_reserve_fit<Support::Log2Size>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_reserve_grow(Arena& arena, size_t n, Support::ByteSize item_size) noexcept {
return ArenaVector_reserve_grow<Support::ByteSize>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_reserve_grow(Arena& arena, size_t n, Support::Log2Size item_size) noexcept {
return ArenaVector_reserve_grow<Support::Log2Size>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_reserve_additional(Arena& arena, size_t n, Support::ByteSize item_size) noexcept {
return ArenaVector_grow<Support::ByteSize>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_reserve_additional(Arena& arena, size_t n, Support::Log2Size item_size) noexcept {
return ArenaVector_grow<Support::Log2Size>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_resize_fit(Arena& arena, size_t n, Support::ByteSize item_size) noexcept {
return ArenaVector_resize_fit<Support::ByteSize>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_resize_fit(Arena& arena, size_t n, Support::Log2Size item_size) noexcept {
return ArenaVector_resize_fit<Support::Log2Size>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_resize_grow(Arena& arena, size_t n, Support::ByteSize item_size) noexcept {
return ArenaVector_resize_grow<Support::ByteSize>(*this, arena, n, item_size);
}
Error ArenaVectorBase::_resize_grow(Arena& arena, size_t n, Support::Log2Size item_size) noexcept {
return ArenaVector_resize_grow<Support::Log2Size>(*this, arena, n, item_size);
}
// ArenaVector - Tests
// ===================
#if defined(ASMJIT_TEST)
template<typename T>
static void test_arena_vector(Arena& arena, const char* type_name) {
constexpr uint32_t kMiB = 1024 * 1024;
size_t i;
size_t kMax = 100000;
ArenaVector<T> vec;
INFO("ArenaVector<%s> basic tests", type_name);
EXPECT_EQ(vec.append(arena, 0), Error::kOk);
EXPECT_FALSE(vec.is_empty());
EXPECT_EQ(vec.size(), 1u);
EXPECT_GE(vec.capacity(), 1u);
EXPECT_EQ(vec.index_of(0), size_t(0));
EXPECT_TRUE(Globals::is_npos(vec.index_of(-11)));
vec.clear();
EXPECT_TRUE(vec.is_empty());
EXPECT_EQ(vec.size(), 0u);
EXPECT_TRUE(Globals::is_npos(vec.index_of(0)));
for (i = 0; i < kMax; i++) {
EXPECT_EQ(vec.append(arena, T(i)), Error::kOk);
}
EXPECT_FALSE(vec.is_empty());
EXPECT_EQ(vec.size(), size_t(kMax));
EXPECT_EQ(vec.index_of(T(0)), size_t(0));
EXPECT_EQ(vec.index_of(T(kMax - 1)), uint32_t(kMax - 1));
EXPECT_EQ(vec.begin()[0], 0);
EXPECT_EQ(vec.end()[-1], T(kMax - 1));
INFO("ArenaVector<%s>::operator=(ArenaVector<%s>&&)", type_name, type_name);
ArenaVector<T> moved_vec(std::move(vec));
EXPECT_EQ(vec.data(), nullptr);
EXPECT_EQ(vec.size(), 0u);
EXPECT_EQ(vec.capacity(), 0u);
moved_vec.release(arena);
INFO("ArenaVector<%s>::reserve_grow()", type_name);
for (uint32_t j = 8; j < 40 / sizeof(T); j += 8) {
EXPECT_EQ(vec.reserve_grow(arena, j * kMiB), Error::kOk);
EXPECT_GE(vec.capacity(), j * kMiB);
}
}
template<typename T>
static void test_arena_vector_capacity(Arena& arena, const char* type_name) {
ArenaVector<T> vec;
INFO("ArenaVector<%s> capacity (growing) test", type_name);
for (size_t i = 0; i < 10000000; i++) {
size_t old_capacity = vec.capacity();
EXPECT_EQ(vec.append(arena, T(i)), Error::kOk);
if (vec.capacity() != old_capacity) {
INFO(" Increasing capacity from %zu to %zu (vector size=%zu)\n", old_capacity, vec.capacity(), vec.size());
}
}
}
UNIT(arena_vector, -1) {
Arena arena(8192);
test_arena_vector<int32_t>(arena, "int32_t");
test_arena_vector_capacity<int32_t>(arena, "int32_t");
test_arena_vector<int64_t>(arena, "int64_t");
test_arena_vector_capacity<int64_t>(arena, "int64_t");
}
#endif
ASMJIT_END_NAMESPACE

View File

@@ -0,0 +1,625 @@
// This file is part of AsmJit project <https://asmjit.com>
//
// See <asmjit/core.h> or LICENSE.md for license and copyright information
// SPDX-License-Identifier: Zlib
#ifndef ASMJIT_CORE_ARENAVECTOR_H_INCLUDED
#define ASMJIT_CORE_ARENAVECTOR_H_INCLUDED
#include "../core/arena.h"
#include "../core/span.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_support
//! \{
//! Base class used by \ref ArenaVector template.
class ArenaVectorBase {
public:
ASMJIT_NONCOPYABLE(ArenaVectorBase)
//! \name Types (C++ compatibility)
//! \{
using size_type = size_t;
using difference_type = ptrdiff_t;
//! \}
//! \name Members
//! \{
//! Vector data (untyped).
void* _data {};
//! Size of the vector.
uint32_t _size {};
//! Capacity of the vector.
uint32_t _capacity {};
//! \}
protected:
//! \name Construction & Destruction
//! \{
//! Creates a new instance of `ArenaVectorBase`.
ASMJIT_INLINE_NODEBUG ArenaVectorBase() noexcept {}
ASMJIT_INLINE_NODEBUG ArenaVectorBase(ArenaVectorBase&& other) noexcept
: _data(other._data),
_size(other._size),
_capacity(other._capacity) { other.reset(); }
//! \}
//! \cond INTERNAL
//! \name Internal
//! \{
inline void _release(Arena& arena, uint32_t item_byte_size) noexcept {
if (_data != nullptr) {
arena.free_reusable(_data, _capacity * item_byte_size);
reset();
}
}
ASMJIT_INLINE_NODEBUG void _move_from(ArenaVectorBase&& other) noexcept {
void* data = other._data;
uint32_t size = other._size;
uint32_t capacity = other._capacity;
other._data = nullptr;
other._size = 0u;
other._capacity = 0u;
_data = data;
_size = size;
_capacity = capacity;
}
ASMJIT_API Error _reserve_fit(Arena& arena, size_t n, Support::ByteSize byte_size) noexcept;
ASMJIT_API Error _reserve_fit(Arena& arena, size_t n, Support::Log2Size log2_size) noexcept;
ASMJIT_API Error _reserve_grow(Arena& arena, size_t n, Support::ByteSize byte_size) noexcept;
ASMJIT_API Error _reserve_grow(Arena& arena, size_t n, Support::Log2Size log2_size) noexcept;
ASMJIT_API Error _reserve_additional(Arena& arena, size_t n, Support::ByteSize byte_size) noexcept;
ASMJIT_API Error _reserve_additional(Arena& arena, size_t n, Support::Log2Size log2_size) noexcept;
ASMJIT_API Error _resize_fit(Arena& arena, size_t n, Support::ByteSize byte_size) noexcept;
ASMJIT_API Error _resize_fit(Arena& arena, size_t n, Support::Log2Size log2_size) noexcept;
ASMJIT_API Error _resize_grow(Arena& arena, size_t n, Support::ByteSize byte_size) noexcept;
ASMJIT_API Error _resize_grow(Arena& arena, size_t n, Support::Log2Size log2_size) noexcept;
ASMJIT_INLINE_NODEBUG void _swap(ArenaVectorBase& other) noexcept {
std::swap(_data, other._data);
std::swap(_size, other._size);
std::swap(_capacity, other._capacity);
}
//! \}
//! \endcond
public:
//! \name Accessors
//! \{
//! Tests whether the vector is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _size == 0; }
//! Returns the vector size.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
//! Returns the vector capacity.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t capacity() const noexcept { return _capacity; }
//! \}
//! \name Utilities
//! \{
//! Makes the vector empty (won't change the capacity or data pointer).
ASMJIT_INLINE_NODEBUG void clear() noexcept { _size = 0u; }
//! Resets the vector data and set its `size` to zero.
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_data = nullptr;
_size = 0;
_capacity = 0;
}
//! Truncates the vector to at most `n` items.
ASMJIT_INLINE_NODEBUG void truncate(size_t n) noexcept {
_size = uint32_t(Support::min<size_t>(_size, n));
}
//! Sets size of the vector to `n`. Used internally by some algorithms.
inline void _set_size(size_t n) noexcept {
ASMJIT_ASSERT(n <= _capacity);
_size = uint32_t(n);
}
//! \}
};
//! Template used to store and manage array of \ref Arena allocated data.
//!
//! This template has these advantages over other std::vector<>:
//! - Always non-copyable (designed to be non-copyable, we want it).
//! - Optimized for working only with POD types.
//! - Explicit allocation - \ref Arena is not part of the data for improved memory footprint.
template <typename T>
class ArenaVector : public ArenaVectorBase {
public:
ASMJIT_NONCOPYABLE(ArenaVector)
//! \name Types (C++ compatibility)
//! \{
using value_type = T;
using pointer = T*;
using const_pointer = const T*;
using reference = T&;
using const_reference = const T&;
using iterator = T*;
using const_iterator = const T*;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a default constructed ArenaVector (data pointer is null, and both length/capacity is zero).
ASMJIT_INLINE_NODEBUG ArenaVector() noexcept : ArenaVectorBase() {}
//! Moves an existing vector into this instance and resets the `other` instance.
ASMJIT_INLINE_NODEBUG ArenaVector(ArenaVector&& other) noexcept : ArenaVectorBase(std::move(other)) {}
//! \}
//! \name Overloaded Operators
//! \{
//! Implements a move assignment operator. The `other` instance is reset before this instance is set.
//!
//! \note It's recommended to first release the memory of the destination vector as there is no way
//! how to do it after the move, unless it's guaranteed that the destination vector is default
//! constructed.
ASMJIT_INLINE_NODEBUG ArenaVector& operator=(ArenaVector&& other) noexcept {
_move_from(other);
return *this;
}
//! \}
//! \name Overloaded Operators
//! \{
//! Returns item at index `i`.
[[nodiscard]]
ASMJIT_INLINE T& operator[](size_t i) noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
//! Returns item at index `i`.
[[nodiscard]]
ASMJIT_INLINE const T& operator[](size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
//! Returns a non-owning span of this vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG operator Span<T>() const noexcept { return Span<T>(static_cast<T*>(_data), _size); }
//! \}
//! \name Accessors
//! \{
//! Returns a non-owning span of this vector.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Span<T> as_span() const noexcept { return Span<T>(static_cast<T*>(_data), _size); }
//! Returns vector data (mutable).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG T* data() noexcept { return static_cast<T*>(_data); }
//! Returns vector data (const)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* data() const noexcept { return static_cast<const T*>(_data); }
//! Returns vector data (const)
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const T* cdata() const noexcept { return static_cast<const T*>(_data); }
//! Returns item at the given index `i` (const).
[[nodiscard]]
inline const T& at(size_t i) const noexcept {
ASMJIT_ASSERT(i < _size);
return data()[i];
}
inline void _set_end(T* p) noexcept {
ASMJIT_ASSERT(p >= data() && p <= data() + _capacity);
_set_size(size_t(p - data()));
}
//! Returns a reference to the first element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use `first()` on empty vector will trigger
//! an assertion failure in debug builds.
[[nodiscard]]
ASMJIT_INLINE T& first() noexcept { return operator[](0); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE const T& first() const noexcept { return operator[](0); }
//! Returns a reference to the last element of the vector.
//!
//! \note The vector must have at least one element. Attempting to use `last()` on empty vector will trigger
//! an assertion failure in debug builds.
[[nodiscard]]
ASMJIT_INLINE T& last() noexcept { return operator[](_size - 1); }
//! \overload
[[nodiscard]]
ASMJIT_INLINE const T& last() const noexcept { return operator[](_size - 1); }
//! \}
//! \name C++ Compatibility (Iterators)
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator begin() noexcept { return iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator begin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG iterator end() noexcept { return iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator end() const noexcept { return const_iterator(data() + _size); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cbegin() const noexcept { return const_iterator(data()); };
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const_iterator cend() const noexcept { return const_iterator(data() + _size); };
//! \}
//! \name Iteration
//! \{
ASMJIT_INLINE_NODEBUG SpanForwardIteratorAdaptor<T> iterate() const noexcept {
T* p = static_cast<T*>(_data);
return SpanForwardIteratorAdaptor<T>{p, p + _size};
}
ASMJIT_INLINE_NODEBUG SpanReverseIteratorAdaptor<T> iterate_reverse() const noexcept {
T* p = static_cast<T*>(_data);
return SpanReverseIteratorAdaptor<T>{p, p + _size};
}
//! \}
//! \name Utilities
//! \{
//! Swaps this vector with `other`.
ASMJIT_INLINE void swap(ArenaVector<T>& other) noexcept { _swap(other); }
//! Prepends `item` to the vector.
ASMJIT_INLINE Error prepend(Arena& arena, const T& item) noexcept {
ASMJIT_PROPAGATE(reserve_additional(arena));
memmove(static_cast<void*>(static_cast<T*>(_data) + 1),
static_cast<const void*>(_data),
size_t(_size) * sizeof(T));
memcpy(static_cast<void*>(_data),
static_cast<const void*>(&item),
sizeof(T));
_size++;
return Error::kOk;
}
//! Inserts an `item` at the specified `index`.
ASMJIT_INLINE Error insert(Arena& arena, size_t index, const T& item) noexcept {
ASMJIT_ASSERT(index <= _size);
ASMJIT_PROPAGATE(reserve_additional(arena));
T* dst = static_cast<T*>(_data) + index;
memmove(static_cast<void*>(dst + 1),
static_cast<const void*>(dst),
size_t(_size - index) * sizeof(T));
memcpy(static_cast<void*>(dst),
static_cast<const void*>(&item),
sizeof(T));
_size++;
return Error::kOk;
}
//! Appends `item` to the vector.
ASMJIT_INLINE Error append(Arena& arena, const T& item) noexcept {
ASMJIT_PROPAGATE(reserve_additional(arena));
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(&item),
sizeof(T));
_size++;
return Error::kOk;
}
//! Appends `other` vector at the end of this vector.
ASMJIT_INLINE Error concat(Arena& arena, const ArenaVector<T>& other) noexcept {
uint32_t size = other._size;
if (_capacity - _size < size) {
ASMJIT_PROPAGATE(reserve_additional(arena, size));
}
if (size) {
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(other._data),
size_t(size) * sizeof(T));
_size += size;
}
return Error::kOk;
}
ASMJIT_INLINE void assign_unchecked(const ArenaVector<T>& other) noexcept {
uint32_t size = other._size;
ASMJIT_ASSERT(_capacity >= other._size);
if (size) {
memcpy(_data, other._data, size_t(size) * sizeof(T));
}
_size = size;
}
//! Prepends `item` to the vector (unsafe case).
//!
//! Can only be used together with `reserve_additional()`. If `reserve_additional(N)` returns `Error::kOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_INLINE void prepend_unchecked(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
T* data = static_cast<T*>(_data);
if (_size) {
memmove(static_cast<void*>(data + 1),
static_cast<const void*>(data),
size_t(_size) * sizeof(T));
}
memcpy(static_cast<void*>(data),
static_cast<const void*>(&item),
sizeof(T));
_size++;
}
//! Append s`item` to the vector (unsafe case).
//!
//! Can only be used together with `reserve_additional()`. If `reserve_additional(N)` returns `Error::kOk` then N elements
//! can be added to the vector without checking if there is a place for them. Used mostly internally.
ASMJIT_INLINE void append_unchecked(const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(&item),
sizeof(T));
_size++;
}
//! Inserts an `item` at the specified `index` (unsafe case).
ASMJIT_INLINE void insert_unchecked(size_t index, const T& item) noexcept {
ASMJIT_ASSERT(_size < _capacity);
ASMJIT_ASSERT(index <= _size);
T* dst = static_cast<T*>(_data) + index;
memmove(static_cast<void*>(dst + 1),
static_cast<const void*>(dst),
size_t(_size - index) * sizeof(T));
memcpy(static_cast<void*>(dst),
static_cast<const void*>(&item),
sizeof(T));
_size++;
}
//! Concatenates all items of `other` at the end of the vector.
ASMJIT_INLINE void concat_unchecked(const ArenaVector<T>& other) noexcept {
uint32_t size = other._size;
ASMJIT_ASSERT(_capacity - _size >= size);
if (size) {
memcpy(static_cast<void*>(static_cast<T*>(_data) + _size),
static_cast<const void*>(other._data),
size_t(size) * sizeof(T));
_size += size;
}
}
//! Removes item at index `i`.
ASMJIT_INLINE void remove_at(size_t i) noexcept {
ASMJIT_ASSERT(i < _size);
T* data = static_cast<T*>(_data) + i;
size_t size = --_size - i;
if (size) {
memmove(static_cast<void*>(data),
static_cast<const void*>(data + 1),
size_t(size) * sizeof(T));
}
}
//! Pops the last element from the vector and returns it.
[[nodiscard]]
ASMJIT_INLINE T pop() noexcept {
ASMJIT_ASSERT(_size > 0);
uint32_t index = --_size;
return data()[index];
}
template<typename CompareT = Support::Compare<Support::SortOrder::kAscending>>
ASMJIT_INLINE void sort(const CompareT& cmp = CompareT()) noexcept {
Support::sort<T, CompareT>(data(), size(), cmp);
}
//! \}
//! \name Utility Functions
//! \{
//! Tests whether the vector contains `value`.
template<typename Value>
ASMJIT_INLINE bool contains(Value&& value) const noexcept {
return as_span().contains(std::forward<Value>(value));
}
//! Returns the first index of the given `value` or `Globals::kNPos` if it wasn't found.
template<typename Value>
ASMJIT_INLINE size_t index_of(Value&& value) const noexcept {
return as_span().index_of(std::forward<Value>(value));
}
//! Returns the last index of the given `value` or `Globals::kNPos` if it wasn't found.
template<typename Value>
ASMJIT_INLINE size_t last_index_of(Value&& value) const noexcept {
return as_span().index_of(std::forward<Value>(value));
}
//! \}
//! \cond INTERNAL
//! \name Memory Management (internal)
//! \{
ASMJIT_INLINE Error _reserve_fit(Arena& arena, size_t n) noexcept {
return ArenaVectorBase::_reserve_fit(arena, n, Support::as_item_size<sizeof(T)>());
}
ASMJIT_INLINE Error _reserve_grow(Arena& arena, size_t n) noexcept {
return ArenaVectorBase::_reserve_grow(arena, n, Support::as_item_size<sizeof(T)>());
}
ASMJIT_INLINE Error _resize_fit(Arena& arena, size_t n) noexcept {
return ArenaVectorBase::_resize_fit(arena, n, Support::as_item_size<sizeof(T)>());
}
ASMJIT_INLINE Error _resize_grow(Arena& arena, size_t n) noexcept {
return ArenaVectorBase::_resize_grow(arena, n, Support::as_item_size<sizeof(T)>());
}
ASMJIT_INLINE Error _reserve_additional(Arena& arena, size_t n) noexcept {
return ArenaVectorBase::_reserve_additional(arena, n, Support::as_item_size<sizeof(T)>());
}
//! \}
//! \endcond
//! \name Memory Management
//! \{
//! Releases the memory held by `ArenaVector<T>` back to the `arena`.
ASMJIT_INLINE void release(Arena& arena) noexcept {
_release(arena, sizeof(T));
}
//! Reallocates the underlying array to fit at least `n` items with fit semantics.
//!
//! \remarks This function uses a fit policy, which means that when possible the underlying array would be
//! allocated to hold at least `n` elements exactly or the resulting capacity would be slightly higher.
[[nodiscard]]
ASMJIT_INLINE Error reserve_fit(Arena& arena, size_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity)) {
return _reserve_fit(arena, n);
}
else {
return Error(Error::kOk);
}
}
//! Reallocates the underlying array to fit at least `n` items with grow semantics.
//!
//! If the vector is smaller than `n` the same growing calculations will be used as if `n` items were appended
//! to an empty vector, which means reserving additional space for more append operations that could follow.
[[nodiscard]]
inline Error reserve_grow(Arena& arena, size_t n) noexcept {
if (ASMJIT_UNLIKELY(n > _capacity)) {
return _reserve_grow(arena, n);
}
else {
return Error(Error::kOk);
}
}
//! Called to grow the buffer to fit at least 1 element more.
[[nodiscard]]
ASMJIT_INLINE Error reserve_additional(Arena& arena) noexcept {
if (ASMJIT_UNLIKELY(_size == _capacity)) {
return _reserve_additional(arena, 1u);
}
else {
return Error::kOk;
}
}
//! Called to grow the buffer to fit at least `n` elements more.
[[nodiscard]]
ASMJIT_INLINE Error reserve_additional(Arena& arena, size_t n) noexcept {
if (ASMJIT_UNLIKELY(_capacity - _size < n)) {
return _reserve_additional(arena, n);
}
else {
return Error::kOk;
}
}
//! Resizes the vector to hold `n` elements with fit semantics.
//!
//! If `n` is greater than the current size then the additional elements' content will be initialized to zero.
//! If `n` is less than the current size then the vector will be truncated to exactly `n` elements.
[[nodiscard]]
ASMJIT_INLINE Error resize_fit(Arena& arena, size_t n) noexcept {
return _resize_fit(arena, n);
}
//! Resizes the vector to hold `n` elements.
//!
//! If `n` is greater than the current size then the additional elements' content will be initialized to zero.
//! If `n` is less than the current size then the vector will be truncated to exactly `n` elements.
[[nodiscard]]
ASMJIT_INLINE Error resize_grow(Arena& arena, size_t n) noexcept {
return _resize_grow(arena, n);
}
//! \}
};
//! \}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_CORE_ARENAVECTOR_H_INCLUDED

View File

@@ -25,18 +25,18 @@ BaseAssembler::~BaseAssembler() noexcept {}
// BaseAssembler - Buffer Management
// =================================
Error BaseAssembler::setOffset(size_t offset) {
Error BaseAssembler::set_offset(size_t offset) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
size_t size = Support::max<size_t>(_section->bufferSize(), this->offset());
size_t size = Support::max<size_t>(_section->buffer_size(), this->offset());
if (ASMJIT_UNLIKELY(offset > size)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
return report_error(make_error(Error::kInvalidArgument));
}
_bufferPtr = _bufferData + offset;
return kErrorOk;
_buffer_ptr = _buffer_data + offset;
return Error::kOk;
}
// BaseAssembler - Section Management
@@ -46,25 +46,25 @@ static ASMJIT_INLINE Error BaseAssembler_initSection(BaseAssembler* self, Sectio
uint8_t* p = section->_buffer._data;
self->_section = section;
self->_bufferData = p;
self->_bufferPtr = p + section->_buffer._size;
self->_bufferEnd = p + section->_buffer._capacity;
self->_buffer_data = p;
self->_buffer_ptr = p + section->_buffer._size;
self->_buffer_end = p + section->_buffer._capacity;
return kErrorOk;
return Error::kOk;
}
Error BaseAssembler::section(Section* section) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
if (!_code->isSectionValid(section->sectionId()) || _code->_sections[section->sectionId()] != section) {
return reportError(DebugUtils::errored(kErrorInvalidSection));
if (!_code->is_section_valid(section->section_id()) || _code->_sections[section->section_id()] != section) {
return report_error(make_error(Error::kInvalidSection));
}
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
_logger->logf(".section %s {#%u}\n", section->name(), section->sectionId());
_logger->logf(".section %s {#%u}\n", section->name(), section->section_id());
}
#endif
@@ -74,30 +74,30 @@ Error BaseAssembler::section(Section* section) {
// BaseAssembler - Label Management
// ================================
Label BaseAssembler::newLabel() {
Label BaseAssembler::new_label() {
Label label;
if (ASMJIT_LIKELY(_code)) {
Error err = _code->newLabelId(&label._baseId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
Error err = _code->new_label_id(Out(label._base_id));
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
report_error(err);
}
}
return label;
}
Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
Label BaseAssembler::new_named_label(const char* name, size_t name_size, LabelType type, uint32_t parent_id) {
Label label;
if (ASMJIT_LIKELY(_code)) {
uint32_t labelId;
Error err = _code->newNamedLabelId(&labelId, name, nameSize, type, parentId);
if (ASMJIT_UNLIKELY(err)) {
reportError(err);
uint32_t label_id;
Error err = _code->new_named_label_id(Out(label_id), name, name_size, type, parent_id);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
report_error(err);
}
else {
label.setId(labelId);
label.set_id(label_id);
}
}
@@ -106,99 +106,99 @@ Label BaseAssembler::newNamedLabel(const char* name, size_t nameSize, LabelType
Error BaseAssembler::bind(const Label& label) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
Error err = _code->bindLabel(label, _section->sectionId(), offset());
Error err = _code->bind_label(label, _section->section_id(), offset());
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
EmitterUtils::logLabelBound(this, label);
EmitterUtils::log_label_bound(this, label);
}
#endif
resetInlineComment();
if (err) {
return reportError(err);
reset_inline_comment();
if (err != Error::kOk) {
return report_error(err);
}
return kErrorOk;
return Error::kOk;
}
// BaseAssembler - Embed
// =====================
Error BaseAssembler::embed(const void* data, size_t dataSize) {
Error BaseAssembler::embed(const void* data, size_t data_size) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
if (dataSize == 0) {
return kErrorOk;
if (data_size == 0) {
return Error::kOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
ASMJIT_PROPAGATE(writer.ensure_space(this, data_size));
writer.emitData(data, dataSize);
writer.emit_data(data, data_size);
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), TypeId::kUInt8, data, dataSize, 1);
Formatter::format_data(sb, _logger->flags(), arch(), TypeId::kUInt8, data, data_size, 1);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
return Error::kOk;
}
Error BaseAssembler::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize());
TypeId finalTypeId = TypeUtils::deabstract(typeId, deabstractDelta);
Error BaseAssembler::embed_data_array(TypeId type_id, const void* data, size_t item_count, size_t repeat_count) {
uint32_t deabstract_delta = TypeUtils::deabstract_delta_of_size(register_size());
TypeId final_type_id = TypeUtils::deabstract(type_id, deabstract_delta);
if (ASMJIT_UNLIKELY(!TypeUtils::isValid(finalTypeId))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (ASMJIT_UNLIKELY(!TypeUtils::is_valid(final_type_id))) {
return report_error(make_error(Error::kInvalidArgument));
}
if (itemCount == 0 || repeatCount == 0) {
return kErrorOk;
if (item_count == 0 || repeat_count == 0) {
return Error::kOk;
}
uint32_t typeSize = TypeUtils::sizeOf(finalTypeId);
uint32_t type_size = TypeUtils::size_of(final_type_id);
Support::FastUInt8 of = 0;
size_t dataSize = Support::mulOverflow(itemCount, size_t(typeSize), &of);
size_t totalSize = Support::mulOverflow(dataSize, repeatCount, &of);
size_t data_size = Support::mul_overflow(item_count, size_t(type_size), &of);
size_t total_size = Support::mul_overflow(data_size, repeat_count, &of);
if (ASMJIT_UNLIKELY(of)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
return report_error(make_error(Error::kOutOfMemory));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, totalSize));
ASMJIT_PROPAGATE(writer.ensure_space(this, total_size));
for (size_t i = 0; i < repeatCount; i++) {
writer.emitData(data, dataSize);
for (size_t i = 0; i < repeat_count; i++) {
writer.emit_data(data, data_size);
}
writer.done(this);
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), typeId, data, itemCount, repeatCount);
Formatter::format_data(sb, _logger->flags(), arch(), type_id, data, item_count, repeat_count);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
return Error::kOk;
}
#ifndef ASMJIT_NO_LOGGING
static const TypeId dataTypeIdBySize[9] = {
static const TypeId data_type_id_by_size_table[9] = {
TypeId::kVoid, // [0] (invalid)
TypeId::kUInt8, // [1] (uint8_t)
TypeId::kUInt16, // [2] (uint16_t)
@@ -211,13 +211,13 @@ static const TypeId dataTypeIdBySize[9] = {
};
#endif
Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
Error BaseAssembler::embed_const_pool(const Label& label, const ConstPool& pool) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
if (ASMJIT_UNLIKELY(!isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (ASMJIT_UNLIKELY(!is_label_valid(label))) {
return report_error(make_error(Error::kInvalidLabel));
}
ASMJIT_PROPAGATE(align(AlignMode::kData, uint32_t(pool.alignment())));
@@ -225,11 +225,11 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
size_t size = pool.size();
if (!size) {
return kErrorOk;
return Error::kOk;
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, size));
ASMJIT_PROPAGATE(writer.ensure_space(this, size));
#ifndef ASMJIT_NO_LOGGING
uint8_t* data = writer.cursor();
@@ -241,166 +241,166 @@ Error BaseAssembler::embedConstPool(const Label& label, const ConstPool& pool) {
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
uint32_t dataSizeLog2 = Support::min<uint32_t>(Support::ctz(pool.minItemSize()), 3);
uint32_t dataSize = 1 << dataSizeLog2;
uint32_t data_size_log2 = Support::min<uint32_t>(Support::ctz(pool.min_item_size()), 3);
uint32_t data_size = 1 << data_size_log2;
StringTmp<512> sb;
Formatter::formatData(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize], data, size >> dataSizeLog2);
Formatter::format_data(sb, _logger->flags(), arch(), data_type_id_by_size_table[data_size], data, size >> data_size_log2);
sb.append('\n');
_logger->log(sb);
}
#endif
return kErrorOk;
return Error::kOk;
}
Error BaseAssembler::embedLabel(const Label& label, size_t dataSize) {
Error BaseAssembler::embed_label(const Label& label, size_t data_size) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
if (ASMJIT_UNLIKELY(isLabelValid(label))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (ASMJIT_UNLIKELY(is_label_valid(label))) {
return report_error(make_error(Error::kInvalidLabel));
}
RelocEntry* re;
LabelEntry& le = _code->labelEntry(label);
LabelEntry& le = _code->label_entry_of(label);
if (dataSize == 0) {
dataSize = registerSize();
if (data_size == 0) {
data_size = register_size();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2UpTo(dataSize, 8u))) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
if (ASMJIT_UNLIKELY(!Support::is_power_of_2_up_to(data_size, 8u))) {
return report_error(make_error(Error::kInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
ASMJIT_PROPAGATE(writer.ensure_space(this, data_size));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
Formatter::format_data_type(sb, _logger->flags(), arch(), data_type_id_by_size_table[data_size]);
sb.append(' ');
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
Formatter::format_label(sb, FormatFlags::kNone, this, label.id());
sb.append('\n');
_logger->log(sb);
}
#endif
Error err = _code->newRelocEntry(&re, RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
Error err = _code->new_reloc_entry(Out(re), RelocType::kRelToAbs);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
re->_sourceSectionId = _section->sectionId();
re->_sourceOffset = offset();
re->_format.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
re->_source_section_id = _section->section_id();
re->_source_offset = offset();
re->_format.reset_to_simple_value(OffsetType::kUnsignedOffset, data_size);
if (le.isBound()) {
re->_targetSectionId = le.sectionId();
if (le.is_bound()) {
re->_target_section_id = le.section_id();
re->_payload = le.offset();
}
else {
OffsetFormat of;
of.resetToSimpleValue(OffsetType::kUnsignedOffset, dataSize);
of.reset_to_simple_value(OffsetType::kUnsignedOffset, data_size);
Fixup* fixup = _code->newFixup(le, _section->sectionId(), offset(), 0, of);
Fixup* fixup = _code->new_fixup(le, _section->section_id(), offset(), 0, of);
if (ASMJIT_UNLIKELY(!fixup)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
return report_error(make_error(Error::kOutOfMemory));
}
fixup->labelOrRelocId = re->id();
fixup->label_or_reloc_id = re->id();
}
// Emit dummy DWORD/QWORD depending on the data size.
writer.emitZeros(dataSize);
writer.emit_zeros(data_size);
writer.done(this);
return kErrorOk;
return Error::kOk;
}
Error BaseAssembler::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
Error BaseAssembler::embed_label_delta(const Label& label, const Label& base, size_t data_size) {
if (ASMJIT_UNLIKELY(!_code)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
return report_error(make_error(Error::kNotInitialized));
}
if (ASMJIT_UNLIKELY(!Support::bool_and(_code->isLabelValid(label), _code->isLabelValid(base)))) {
return reportError(DebugUtils::errored(kErrorInvalidLabel));
if (ASMJIT_UNLIKELY(!Support::bool_and(_code->is_label_valid(label), _code->is_label_valid(base)))) {
return report_error(make_error(Error::kInvalidLabel));
}
LabelEntry& labelEntry = _code->labelEntry(label);
LabelEntry& baseEntry = _code->labelEntry(base);
LabelEntry& label_entry = _code->label_entry_of(label);
LabelEntry& base_entry = _code->label_entry_of(base);
if (dataSize == 0) {
dataSize = registerSize();
if (data_size == 0) {
data_size = register_size();
}
if (ASMJIT_UNLIKELY(!Support::isPowerOf2UpTo(dataSize, 8u))) {
return reportError(DebugUtils::errored(kErrorInvalidOperandSize));
if (ASMJIT_UNLIKELY(!Support::is_power_of_2_up_to(data_size, 8u))) {
return report_error(make_error(Error::kInvalidOperandSize));
}
CodeWriter writer(this);
ASMJIT_PROPAGATE(writer.ensureSpace(this, dataSize));
ASMJIT_PROPAGATE(writer.ensure_space(this, data_size));
#ifndef ASMJIT_NO_LOGGING
if (_logger) {
StringTmp<256> sb;
sb.append('.');
Formatter::formatDataType(sb, _logger->flags(), arch(), dataTypeIdBySize[dataSize]);
Formatter::format_data_type(sb, _logger->flags(), arch(), data_type_id_by_size_table[data_size]);
sb.append(" (");
Formatter::formatLabel(sb, FormatFlags::kNone, this, label.id());
Formatter::format_label(sb, FormatFlags::kNone, this, label.id());
sb.append(" - ");
Formatter::formatLabel(sb, FormatFlags::kNone, this, base.id());
Formatter::format_label(sb, FormatFlags::kNone, this, base.id());
sb.append(")\n");
_logger->log(sb);
}
#endif
// If both labels are bound within the same section it means the delta can be calculated now.
if (labelEntry.isBound() && baseEntry.isBound() && labelEntry.sectionId() == baseEntry.sectionId()) {
uint64_t delta = labelEntry.offset() - baseEntry.offset();
writer.emitValueLE(delta, dataSize);
if (label_entry.is_bound() && base_entry.is_bound() && label_entry.section_id() == base_entry.section_id()) {
uint64_t delta = label_entry.offset() - base_entry.offset();
writer.emit_value_le(delta, data_size);
}
else {
RelocEntry* re;
Error err = _code->newRelocEntry(&re, RelocType::kExpression);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
Error err = _code->new_reloc_entry(Out(re), RelocType::kExpression);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
Expression* exp = _code->_zone.newT<Expression>();
Expression* exp = _code->_arena.new_oneshot<Expression>();
if (ASMJIT_UNLIKELY(!exp)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
return report_error(make_error(Error::kOutOfMemory));
}
exp->reset();
exp->opType = ExpressionOpType::kSub;
exp->setValueAsLabelId(0, label.id());
exp->setValueAsLabelId(1, base.id());
exp->op_type = ExpressionOpType::kSub;
exp->set_value_as_label_id(0, label.id());
exp->set_value_as_label_id(1, base.id());
re->_format.resetToSimpleValue(OffsetType::kSignedOffset, dataSize);
re->_sourceSectionId = _section->sectionId();
re->_sourceOffset = offset();
re->_format.reset_to_simple_value(OffsetType::kSignedOffset, data_size);
re->_source_section_id = _section->section_id();
re->_source_offset = offset();
re->_payload = (uint64_t)(uintptr_t)exp;
writer.emitZeros(dataSize);
writer.emit_zeros(data_size);
}
writer.done(this);
return kErrorOk;
return Error::kOk;
}
// BaseAssembler - Comment
// =======================
Error BaseAssembler::comment(const char* data, size_t size) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!has_emitter_flag(EmitterFlags::kLogComments)) {
if (!has_emitter_flag(EmitterFlags::kAttached)) {
return report_error(make_error(Error::kNotInitialized));
}
return kErrorOk;
return Error::kOk;
}
#ifndef ASMJIT_NO_LOGGING
@@ -409,34 +409,34 @@ Error BaseAssembler::comment(const char* data, size_t size) {
_logger->log(data, size);
_logger->log("\n", 1);
return kErrorOk;
return Error::kOk;
#else
DebugUtils::unused(data, size);
return kErrorOk;
Support::maybe_unused(data, size);
return Error::kOk;
#endif
}
// BaseAssembler - Events
// ======================
Error BaseAssembler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error BaseAssembler::on_attach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::on_attach(code));
// Attach to the end of the .text section.
return BaseAssembler_initSection(this, code._sections[0]);
}
Error BaseAssembler::onDetach(CodeHolder& code) noexcept {
Error BaseAssembler::on_detach(CodeHolder& code) noexcept {
_section = nullptr;
_bufferData = nullptr;
_bufferEnd = nullptr;
_bufferPtr = nullptr;
return Base::onDetach(code);
_buffer_data = nullptr;
_buffer_end = nullptr;
_buffer_ptr = nullptr;
return Base::on_detach(code);
}
Error BaseAssembler::onReinit(CodeHolder& code) noexcept {
// BaseEmitter::onReinit() never fails.
(void)Base::onReinit(code);
Error BaseAssembler::on_reinit(CodeHolder& code) noexcept {
// BaseEmitter::on_reinit() never fails.
(void)Base::on_reinit(code);
return BaseAssembler_initSection(this, code._sections[0]);
}

View File

@@ -30,14 +30,19 @@ public:
ASMJIT_NONCOPYABLE(BaseAssembler)
using Base = BaseEmitter;
//! \name Members
//! \{
//! Current section where the assembling happens.
Section* _section = nullptr;
//! Start of the CodeBuffer of the current section.
uint8_t* _bufferData = nullptr;
uint8_t* _buffer_data = nullptr;
//! End (first invalid byte) of the current section.
uint8_t* _bufferEnd = nullptr;
uint8_t* _buffer_end = nullptr;
//! Pointer in the CodeBuffer of the current section.
uint8_t* _bufferPtr = nullptr;
uint8_t* _buffer_ptr = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
@@ -54,32 +59,32 @@ public:
//! Returns the capacity of the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t bufferCapacity() const noexcept { return (size_t)(_bufferEnd - _bufferData); }
ASMJIT_INLINE_NODEBUG size_t buffer_capacity() const noexcept { return (size_t)(_buffer_end - _buffer_data); }
//! Returns the number of remaining bytes in the current CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t remainingSpace() const noexcept { return (size_t)(_bufferEnd - _bufferPtr); }
ASMJIT_INLINE_NODEBUG size_t remaining_space() const noexcept { return (size_t)(_buffer_end - _buffer_ptr); }
//! Returns the current position in the CodeBuffer.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_bufferPtr - _bufferData); }
ASMJIT_INLINE_NODEBUG size_t offset() const noexcept { return (size_t)(_buffer_ptr - _buffer_data); }
//! Sets the current position in the CodeBuffer to `offset`.
//!
//! \note The `offset` cannot be greater than buffer size even if it's within the buffer's capacity.
ASMJIT_API Error setOffset(size_t offset);
ASMJIT_API Error set_offset(size_t offset);
//! Returns the start of the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferData() const noexcept { return _bufferData; }
ASMJIT_INLINE_NODEBUG uint8_t* buffer_data() const noexcept { return _buffer_data; }
//! Returns the end (first invalid byte) in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferEnd() const noexcept { return _bufferEnd; }
ASMJIT_INLINE_NODEBUG uint8_t* buffer_end() const noexcept { return _buffer_end; }
//! Returns the current pointer in the CodeBuffer in the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* bufferPtr() const noexcept { return _bufferPtr; }
ASMJIT_INLINE_NODEBUG uint8_t* buffer_ptr() const noexcept { return _buffer_ptr; }
//! \}
@@ -88,7 +93,7 @@ public:
//! Returns the current section.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Section* currentSection() const noexcept { return _section; }
ASMJIT_INLINE_NODEBUG Section* current_section() const noexcept { return _section; }
ASMJIT_API Error section(Section* section) override;
@@ -97,8 +102,9 @@ public:
//! \name Label Management
//! \{
ASMJIT_API Label newLabel() override;
ASMJIT_API Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId) override;
ASMJIT_API Label new_label() override;
ASMJIT_API Label new_named_label(const char* name, size_t name_size = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parent_id = Globals::kInvalidId) override;
ASMJIT_API Error bind(const Label& label) override;
//! \}
@@ -106,12 +112,12 @@ public:
//! \name Embed
//! \{
ASMJIT_API Error embed(const void* data, size_t dataSize) override;
ASMJIT_API Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) override;
ASMJIT_API Error embedConstPool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embed(const void* data, size_t data_size) override;
ASMJIT_API Error embed_data_array(TypeId type_id, const void* data, size_t item_count, size_t repeat_count = 1) override;
ASMJIT_API Error embed_const_pool(const Label& label, const ConstPool& pool) override;
ASMJIT_API Error embedLabel(const Label& label, size_t dataSize = 0) override;
ASMJIT_API Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0) override;
ASMJIT_API Error embed_label(const Label& label, size_t data_size = 0) override;
ASMJIT_API Error embed_label_delta(const Label& label, const Label& base, size_t data_size = 0) override;
//! \}
@@ -125,9 +131,9 @@ public:
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
ASMJIT_API Error on_attach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_detach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_reinit(CodeHolder& code) noexcept override;
//! \}
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -17,16 +17,16 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_builder
//! \{
static inline void BaseBuilder_assignInlineComment(BaseBuilder* self, BaseNode* node, const char* comment) noexcept {
static ASMJIT_INLINE void Builder_assign_inline_comment(BaseBuilder* self, BaseNode* node, const char* comment) noexcept {
if (comment) {
node->setInlineComment(static_cast<char*>(self->_codeZone.dup(comment, strlen(comment), true)));
node->set_inline_comment(static_cast<char*>(self->_builder_arena.dup(comment, strlen(comment), true)));
}
}
static inline void BaseBuilder_assignInstState(BaseBuilder* self, InstNode* node, const BaseEmitter::State& state) noexcept {
node->setOptions(state.options);
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(self, node, state.comment);
static ASMJIT_INLINE void Builder_assign_inst_state(BaseBuilder* self, InstNode* node, const BaseEmitter::State& state) noexcept {
node->set_options(state.options);
node->set_extra_reg(state.extra_reg);
Builder_assign_inline_comment(self, node, state.comment);
}
//! \}

View File

@@ -69,27 +69,27 @@ struct CodeBuffer {
//! Tests whether the code buffer has the given `flag` set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG bool has_flag(CodeBufferFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Tests whether this code buffer has a fixed size.
//!
//! Fixed size means that the code buffer is fixed and cannot grow.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return hasFlag(CodeBufferFlags::kIsFixed); }
ASMJIT_INLINE_NODEBUG bool is_fixed() const noexcept { return has_flag(CodeBufferFlags::kIsFixed); }
//! Tests whether the data in this code buffer is external.
//!
//! External data can only be provided by users, it's never used by AsmJit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isExternal() const noexcept { return hasFlag(CodeBufferFlags::kIsExternal); }
ASMJIT_INLINE_NODEBUG bool is_external() const noexcept { return has_flag(CodeBufferFlags::kIsExternal); }
//! Tests whether the data in this code buffer is allocated (non-null).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAllocated() const noexcept { return _data != nullptr; }
ASMJIT_INLINE_NODEBUG bool is_allocated() const noexcept { return _data != nullptr; }
//! Tests whether the code buffer is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return !_size; }
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return !_size; }
//! Returns the size of the data.
[[nodiscard]]

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -6,65 +6,66 @@
#include "../core/api-build_p.h"
#include "../core/codeholder.h"
#include "../core/codewriter_p.h"
#include "../arm/armutils.h"
ASMJIT_BEGIN_NAMESPACE
bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t bitShift = format.immBitShift();
uint32_t discardLsb = format.immDiscardLsb();
bool CodeWriterUtils::encode_offset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bit_count = format.imm_bit_count();
uint32_t bit_shift = format.imm_bit_shift();
uint32_t discard_lsb = format.imm_discard_lsb();
// Invalid offset (should not happen).
if (!bitCount || bitCount > format.valueSize() * 8u) {
if (!bit_count || bit_count > format.value_size() * 8u) {
return false;
}
uint32_t value;
uint32_t u = 0;
bool unsignedLogic = format.type() == OffsetType::kUnsignedOffset;
bool unsigned_logic = format.type() == OffsetType::kUnsignedOffset;
// First handle all offsets that use additional field for their sign and the offset is encoded as its
// absolute value.
if (format.hasSignBit()) {
if (format.has_sign_bit()) {
u = uint32_t(offset64 >= 0);
if (u == 0) {
offset64 = -offset64;
}
unsignedLogic = true;
unsigned_logic = true;
}
// First handle all unsigned offset types.
if (unsignedLogic) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
if (unsigned_logic) {
if (discard_lsb) {
ASMJIT_ASSERT(discard_lsb <= 32);
if ((offset64 & Support::lsb_mask<uint32_t>(discard_lsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
offset64 = int64_t(uint64_t(offset64) >> discard_lsb);
}
value = uint32_t(offset64 & Support::lsbMask<uint32_t>(bitCount));
value = uint32_t(offset64 & Support::lsb_mask<uint32_t>(bit_count));
if (value != offset64) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
if (discard_lsb) {
ASMJIT_ASSERT(discard_lsb <= 32);
if ((offset64 & Support::lsb_mask<uint32_t>(discard_lsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
offset64 >>= discard_lsb;
}
if (!Support::isInt32(offset64)) {
if (!Support::is_int_n<32>(offset64)) {
return false;
}
value = uint32_t(int32_t(offset64));
if (!Support::isEncodableOffset32(int32_t(value), bitCount)) {
if (!Support::is_encodable_offset_32(int32_t(value), bit_count)) {
return false;
}
}
@@ -72,14 +73,14 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint32_t>(bitCount)) << bitShift;
*dst = (value & Support::lsb_mask<uint32_t>(bit_count)) << bit_shift;
return true;
}
// Opcode: {.....|imm:1|..N.N|......|imm:3|....|imm:8}
case OffsetType::kThumb32_ADR: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 12 || bitShift != 0) {
if (format.value_size() != 4 || bit_count != 12 || bit_shift != 0) {
return false;
}
@@ -101,7 +102,7 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {....|.|imm[23]|imm[20:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_B: {
// Sanity checks.
if (format.valueSize() != 4) {
if (format.value_size() != 4) {
return false;
}
@@ -118,7 +119,7 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
// Opcode: {....|.|imm[19]|....|imm[16:11]|..|ja|.|jb|imm[10:0]}
case OffsetType::kThumb32_BCond: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 20 || bitShift != 0) {
if (format.value_size() != 4 || bit_count != 20 || bit_shift != 0) {
return false;
}
@@ -133,57 +134,57 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
}
case OffsetType::kAArch32_ADR: {
uint32_t encodedImm;
if (!arm::Utils::encodeAArch32Imm(value, &encodedImm)) {
uint32_t encoded_imm;
if (!arm::Utils::encode_aarch32_imm(value, Out(encoded_imm))) {
return false;
}
*dst = (Support::bitMask(22) << u) | (encodedImm << bitShift);
*dst = (Support::bit_mask<uint32_t>(22) << u) | (encoded_imm << bit_shift);
return true;
}
case OffsetType::kAArch32_U23_SignedOffset: {
*dst = (value << bitShift) | (u << 23);
*dst = (value << bit_shift) | (u << 23);
return true;
}
case OffsetType::kAArch32_U23_0To3At0_4To7At8: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 8 || bitShift != 0) {
if (format.value_size() != 4 || bit_count != 8 || bit_shift != 0) {
return false;
}
uint32_t immLo = (value & 0x0Fu);
uint32_t immHi = (value & 0xF0u) << (8 - 4);
uint32_t imm_lo = (value & 0x0Fu);
uint32_t imm_hi = (value & 0xF0u) << (8 - 4);
*dst = immLo | immHi | (u << 23);
*dst = imm_lo | imm_hi | (u << 23);
return true;
}
case OffsetType::kAArch32_1To24At0_0At24: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 25 || bitShift != 0) {
if (format.value_size() != 4 || bit_count != 25 || bit_shift != 0) {
return false;
}
uint32_t immLo = (value & 0x0000001u) << 24;
uint32_t immHi = (value & 0x1FFFFFEu) >> 1;
uint32_t imm_lo = (value & 0x0000001u) << 24;
uint32_t imm_hi = (value & 0x1FFFFFEu) >> 1;
*dst = immLo | immHi;
*dst = imm_lo | imm_hi;
return true;
}
case OffsetType::kAArch64_ADR:
case OffsetType::kAArch64_ADRP: {
// Sanity checks.
if (format.valueSize() != 4 || bitCount != 21 || bitShift != 5) {
if (format.value_size() != 4 || bit_count != 21 || bit_shift != 5) {
return false;
}
uint32_t immLo = value & 0x3u;
uint32_t immHi = (value >> 2) & Support::lsbMask<uint32_t>(19);
uint32_t imm_lo = value & 0x3u;
uint32_t imm_hi = (value >> 2) & Support::lsb_mask<uint32_t>(19);
*dst = (immLo << 29) | (immHi << 5);
*dst = (imm_lo << 29) | (imm_hi << 5);
return true;
}
@@ -192,11 +193,11 @@ bool CodeWriterUtils::encodeOffset32(uint32_t* dst, int64_t offset64, const Offs
}
}
bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bitCount = format.immBitCount();
uint32_t discardLsb = format.immDiscardLsb();
bool CodeWriterUtils::encode_offset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept {
uint32_t bit_count = format.imm_bit_count();
uint32_t discard_lsb = format.imm_discard_lsb();
if (!bitCount || bitCount > format.valueSize() * 8u) {
if (!bit_count || bit_count > format.value_size() * 8u) {
return false;
}
@@ -204,30 +205,30 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
// First handle all unsigned offset types.
if (format.type() == OffsetType::kUnsignedOffset) {
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
if (discard_lsb) {
ASMJIT_ASSERT(discard_lsb <= 32);
if ((offset64 & Support::lsb_mask<uint32_t>(discard_lsb)) != 0) {
return false;
}
offset64 = int64_t(uint64_t(offset64) >> discardLsb);
offset64 = int64_t(uint64_t(offset64) >> discard_lsb);
}
value = uint64_t(offset64) & Support::lsbMask<uint64_t>(bitCount);
value = uint64_t(offset64) & Support::lsb_mask<uint64_t>(bit_count);
if (value != uint64_t(offset64)) {
return false;
}
}
else {
// The rest of OffsetType options are all signed.
if (discardLsb) {
ASMJIT_ASSERT(discardLsb <= 32);
if ((offset64 & Support::lsbMask<uint32_t>(discardLsb)) != 0) {
if (discard_lsb) {
ASMJIT_ASSERT(discard_lsb <= 32);
if ((offset64 & Support::lsb_mask<uint32_t>(discard_lsb)) != 0) {
return false;
}
offset64 >>= discardLsb;
offset64 >>= discard_lsb;
}
if (!Support::isEncodableOffset64(offset64, bitCount)) {
if (!Support::is_encodable_offset_64(offset64, bit_count)) {
return false;
}
@@ -237,7 +238,7 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
switch (format.type()) {
case OffsetType::kSignedOffset:
case OffsetType::kUnsignedOffset: {
*dst = (value & Support::lsbMask<uint64_t>(bitCount)) << format.immBitShift();
*dst = (value & Support::lsb_mask<uint64_t>(bit_count)) << format.imm_bit_shift();
return true;
}
@@ -246,15 +247,15 @@ bool CodeWriterUtils::encodeOffset64(uint64_t* dst, int64_t offset64, const Offs
}
}
bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
bool CodeWriterUtils::write_offset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept {
// Offset the destination by ValueOffset so the `dst` points to the
// patched word instead of the beginning of the patched region.
dst = static_cast<char*>(dst) + format.valueOffset();
dst = static_cast<char*>(dst) + format.value_offset();
switch (format.valueSize()) {
switch (format.value_size()) {
case 1: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
if (!encode_offset32(&mask, offset64, format)) {
return false;
}
@@ -264,7 +265,7 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 2: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
if (!encode_offset32(&mask, offset64, format)) {
return false;
}
@@ -274,7 +275,7 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 4: {
uint32_t mask;
if (!encodeOffset32(&mask, offset64, format)) {
if (!encode_offset32(&mask, offset64, format)) {
return false;
}
@@ -284,7 +285,7 @@ bool CodeWriterUtils::writeOffset(void* dst, int64_t offset64, const OffsetForma
case 8: {
uint64_t mask;
if (!encodeOffset64(&mask, offset64, format)) {
if (!encode_offset64(&mask, offset64, format)) {
return false;
}

View File

@@ -24,29 +24,30 @@ public:
uint8_t* _cursor;
ASMJIT_INLINE_NODEBUG explicit CodeWriter(BaseAssembler* a) noexcept
: _cursor(a->_bufferPtr) {}
: _cursor(a->_buffer_ptr) {}
[[nodiscard]]
ASMJIT_INLINE Error ensureSpace(BaseAssembler* a, size_t n) noexcept {
size_t remainingSpace = (size_t)(a->_bufferEnd - _cursor);
if (ASMJIT_UNLIKELY(remainingSpace < n)) {
ASMJIT_INLINE Error ensure_space(BaseAssembler* a, size_t n) noexcept {
size_t remaining_space = (size_t)(a->_buffer_end - _cursor);
if (ASMJIT_UNLIKELY(remaining_space < n)) {
CodeBuffer& buffer = a->_section->_buffer;
Error err = a->_code->growBuffer(&buffer, n);
if (ASMJIT_UNLIKELY(err))
return a->reportError(err);
_cursor = a->_bufferPtr;
Error err = a->_code->grow_buffer(&buffer, n);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return a->report_error(err);
}
return kErrorOk;
_cursor = a->_buffer_ptr;
}
return Error::kOk;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t* cursor() const noexcept { return _cursor; }
ASMJIT_INLINE_NODEBUG void setCursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE_NODEBUG void set_cursor(uint8_t* cursor) noexcept { _cursor = cursor; }
ASMJIT_INLINE_NODEBUG void advance(size_t n) noexcept { _cursor += n; }
[[nodiscard]]
ASMJIT_INLINE size_t offsetFrom(uint8_t* from) const noexcept {
ASMJIT_INLINE size_t offset_from(uint8_t* from) const noexcept {
ASMJIT_ASSERT(_cursor >= from);
return (size_t)(_cursor - from);
}
@@ -59,7 +60,7 @@ public:
}
template<typename T, typename Y>
ASMJIT_INLINE void emit8If(T val, Y cond) noexcept {
ASMJIT_INLINE void emit8_if(T val, Y cond) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size_t(cond) <= 1u);
@@ -68,41 +69,41 @@ public:
}
template<typename T>
ASMJIT_INLINE void emit16uLE(T val) noexcept {
ASMJIT_INLINE void emit16u_le(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u16_le(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit16uBE(T val) noexcept {
ASMJIT_INLINE void emit16u_be(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u16_be(_cursor, uint16_t(U(val) & 0xFFFFu));
_cursor += 2;
}
template<typename T>
ASMJIT_INLINE void emit32uLE(T val) noexcept {
ASMJIT_INLINE void emit32u_le(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u32_le(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
template<typename T>
ASMJIT_INLINE void emit32uBE(T val) noexcept {
ASMJIT_INLINE void emit32u_be(T val) noexcept {
using U = std::make_unsigned_t<T>;
Support::storeu_u32_be(_cursor, uint32_t(U(val) & 0xFFFFFFFFu));
_cursor += 4;
}
ASMJIT_INLINE void emitData(const void* data, size_t size) noexcept {
ASMJIT_INLINE void emit_data(const void* data, size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memcpy(_cursor, data, size);
_cursor += size;
}
template<typename T>
ASMJIT_INLINE void emitValueLE(const T& value, size_t size) noexcept {
ASMJIT_INLINE void emit_value_le(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
@@ -115,7 +116,7 @@ public:
}
template<typename T>
ASMJIT_INLINE void emitValueBE(const T& value, size_t size) noexcept {
ASMJIT_INLINE void emit_value_be(const T& value, size_t size) noexcept {
using U = std::make_unsigned_t<T>;
ASMJIT_ASSERT(size <= sizeof(T));
@@ -127,7 +128,7 @@ public:
_cursor += size;
}
ASMJIT_INLINE void emitZeros(size_t size) noexcept {
ASMJIT_INLINE void emit_zeros(size_t size) noexcept {
ASMJIT_ASSERT(size != 0);
memset(_cursor, 0, size);
_cursor += size;
@@ -157,11 +158,11 @@ public:
ASMJIT_INLINE void done(BaseAssembler* a) noexcept {
CodeBuffer& buffer = a->_section->_buffer;
size_t newSize = (size_t)(_cursor - a->_bufferData);
ASMJIT_ASSERT(newSize <= buffer.capacity());
size_t new_size = (size_t)(_cursor - a->_buffer_data);
ASMJIT_ASSERT(new_size <= buffer.capacity());
a->_bufferPtr = _cursor;
buffer._size = Support::max(buffer._size, newSize);
a->_buffer_ptr = _cursor;
buffer._size = Support::max(buffer._size, new_size);
}
};
@@ -169,13 +170,13 @@ public:
namespace CodeWriterUtils {
[[nodiscard]]
bool encodeOffset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool encode_offset32(uint32_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool encodeOffset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool encode_offset64(uint64_t* dst, int64_t offset64, const OffsetFormat& format) noexcept;
[[nodiscard]]
bool writeOffset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
bool write_offset(void* dst, int64_t offset64, const OffsetFormat& format) noexcept;
} // {CodeWriterUtils}

View File

@@ -27,21 +27,21 @@ class GlobalConstPoolPass : public Pass {
public:
using Base = Pass;
GlobalConstPoolPass() noexcept : Pass("GlobalConstPoolPass") {}
GlobalConstPoolPass(BaseCompiler& cc) noexcept : Pass(cc, "GlobalConstPoolPass") {}
Error run(Zone* zone, Logger* logger) override {
DebugUtils::unused(zone, logger);
Error run(Arena& arena, Logger* logger) override {
Support::maybe_unused(arena, logger);
// Flush the global constant pool.
BaseCompiler* compiler = static_cast<BaseCompiler*>(_cb);
ConstPoolNode* globalConstPool = compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)];
BaseCompiler& compiler = static_cast<BaseCompiler&>(_cb);
ConstPoolNode* global_const_pool = compiler._const_pools[uint32_t(ConstPoolScope::kGlobal)];
if (globalConstPool) {
compiler->addAfter(globalConstPool, compiler->lastNode());
compiler->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
if (global_const_pool) {
compiler.add_after(global_const_pool, compiler.last_node());
compiler._const_pools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
}
return kErrorOk;
return Error::kOk;
}
};
@@ -51,336 +51,333 @@ public:
BaseCompiler::BaseCompiler() noexcept
: BaseBuilder(),
_func(nullptr),
_vRegArray(),
_constPools { nullptr, nullptr } {
_emitterType = EmitterType::kCompiler;
_validationFlags = ValidationFlags::kEnableVirtRegs;
_virt_regs(),
_const_pools { nullptr, nullptr } {
_emitter_type = EmitterType::kCompiler;
_validation_flags = ValidationFlags::kEnableVirtRegs;
}
BaseCompiler::~BaseCompiler() noexcept {}
// BaseCompiler - Function Management
// ==================================
Error BaseCompiler::newFuncNode(FuncNode** out, const FuncSignature& signature) {
Error BaseCompiler::new_func_node(Out<FuncNode*> out, const FuncSignature& signature) {
*out = nullptr;
// Create FuncNode together with all the required surrounding nodes.
FuncNode* funcNode = nullptr;
ASMJIT_PROPAGATE(_newNodeT<FuncNode>(&funcNode));
ASMJIT_PROPAGATE(newLabelNode(&funcNode->_exitNode));
ASMJIT_PROPAGATE(_newNodeT<SentinelNode>(&funcNode->_end, SentinelType::kFuncEnd));
FuncNode* func_node = nullptr;
ASMJIT_PROPAGATE(new_node_t<FuncNode>(Out(func_node)));
ASMJIT_PROPAGATE(new_label_node(Out(func_node->_exit_node)));
ASMJIT_PROPAGATE(new_node_t<SentinelNode>(Out(func_node->_end), SentinelType::kFuncEnd));
// Initialize the function's detail info.
Error err = funcNode->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
Error err = func_node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
// If the Target guarantees greater stack alignment than required by the calling convention
// then override it as we can prevent having to perform dynamic stack alignment
uint32_t environmentStackAlignment = _environment.stackAlignment();
uint32_t environment_stack_alignment = _environment.stack_alignment();
if (funcNode->_funcDetail._callConv.naturalStackAlignment() < environmentStackAlignment) {
funcNode->_funcDetail._callConv.setNaturalStackAlignment(environmentStackAlignment);
if (func_node->_func_detail._call_conv.natural_stack_alignment() < environment_stack_alignment) {
func_node->_func_detail._call_conv.set_natural_stack_alignment(environment_stack_alignment);
}
// Initialize the function frame.
err = funcNode->_frame.init(funcNode->_funcDetail);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
err = func_node->_frame.init(func_node->_func_detail);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
// Allocate space for function arguments.
funcNode->_args = nullptr;
if (funcNode->argCount() != 0) {
funcNode->_args = _codeZone.alloc<FuncNode::ArgPack>(funcNode->argCount() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!funcNode->_args)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
func_node->_args = nullptr;
if (func_node->arg_count() != 0) {
func_node->_args = _builder_arena.alloc_oneshot<FuncNode::ArgPack>(func_node->arg_count() * sizeof(FuncNode::ArgPack));
if (ASMJIT_UNLIKELY(!func_node->_args)) {
return report_error(make_error(Error::kOutOfMemory));
}
memset(funcNode->_args, 0, funcNode->argCount() * sizeof(FuncNode::ArgPack));
memset(func_node->_args, 0, func_node->arg_count() * sizeof(FuncNode::ArgPack));
}
ASMJIT_PROPAGATE(registerLabelNode(funcNode));
ASMJIT_PROPAGATE(register_label_node(func_node));
*out = funcNode;
return kErrorOk;
out = func_node;
return Error::kOk;
}
Error BaseCompiler::addFuncNode(FuncNode** out, const FuncSignature& signature) {
State state = _grabState();
Error BaseCompiler::add_func_node(Out<FuncNode*> out, const FuncSignature& signature) {
State state = _grab_state();
ASMJIT_PROPAGATE(newFuncNode(out, signature));
ASMJIT_ASSUME(*out != nullptr);
ASMJIT_PROPAGATE(new_func_node(out, signature));
Builder_assign_inline_comment(this, *out, state.comment);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addFunc(*out);
return kErrorOk;
add_func(*out);
return Error::kOk;
}
Error BaseCompiler::newFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
uint32_t opCount = !o1.isNone() ? 2u : !o0.isNone() ? 1u : 0u;
Error BaseCompiler::new_func_ret_node(Out<FuncRetNode*> out, const Operand_& o0, const Operand_& o1) {
uint32_t op_count = !o1.is_none() ? 2u : !o0.is_none() ? 1u : 0u;
FuncRetNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<FuncRetNode>(&node));
ASMJIT_PROPAGATE(new_node_t<FuncRetNode>(Out(node)));
ASMJIT_ASSUME(node != nullptr);
node->setOpCount(opCount);
node->setOp(0, o0);
node->setOp(1, o1);
node->resetOpRange(2, node->opCapacity());
node->set_op_count(op_count);
node->set_op(0, o0);
node->set_op(1, o1);
node->reset_op_range(2, node->op_capacity());
*out = node;
return kErrorOk;
out = node;
return Error::kOk;
}
Error BaseCompiler::addFuncRetNode(FuncRetNode** out, const Operand_& o0, const Operand_& o1) {
State state = _grabState();
Error BaseCompiler::add_func_ret_node(Out<FuncRetNode*> out, const Operand_& o0, const Operand_& o1) {
State state = _grab_state();
ASMJIT_PROPAGATE(newFuncRetNode(out, o0, o1));
ASMJIT_ASSUME(*out != nullptr);
ASMJIT_PROPAGATE(new_func_ret_node(out, o0, o1));
Builder_assign_inline_comment(this, *out, state.comment);
BaseBuilder_assignInlineComment(this, *out, state.comment);
addNode(*out);
return kErrorOk;
add_node(*out);
return Error::kOk;
}
FuncNode* BaseCompiler::addFunc(FuncNode* func) {
FuncNode* BaseCompiler::add_func(FuncNode* func) {
_func = func;
addNode(func); // Function node.
add_node(func); // Function node.
BaseNode* prev = cursor(); // {CURSOR}.
addNode(func->exitNode()); // Function exit label.
addNode(func->endNode()); // Function end sentinel.
add_node(func->exit_node()); // Function exit label.
add_node(func->end_node()); // Function end sentinel.
_setCursor(prev);
set_cursor(prev);
return func;
}
Error BaseCompiler::endFunc() {
Error BaseCompiler::end_func() {
FuncNode* func = _func;
resetState();
reset_state();
if (ASMJIT_UNLIKELY(!func)) {
return reportError(DebugUtils::errored(kErrorInvalidState));
return report_error(make_error(Error::kInvalidState));
}
// Add the local constant pool at the end of the function (if exists).
ConstPoolNode* localConstPool = _constPools[uint32_t(ConstPoolScope::kLocal)];
if (localConstPool) {
setCursor(func->endNode()->prev());
addNode(localConstPool);
_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
ConstPoolNode* local_const_pool = _const_pools[uint32_t(ConstPoolScope::kLocal)];
if (local_const_pool) {
set_cursor(func->end_node()->prev());
add_node(local_const_pool);
_const_pools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
}
// Mark as finished.
_func = nullptr;
SentinelNode* end = func->endNode();
setCursor(end);
SentinelNode* end = func->end_node();
set_cursor(end);
return kErrorOk;
return Error::kOk;
}
// BaseCompiler - Function Invocation
// ==================================
Error BaseCompiler::newInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
Error BaseCompiler::new_invoke_node(Out<InvokeNode*> out, InstId inst_id, const Operand_& o0, const FuncSignature& signature) {
InvokeNode* node = nullptr;
ASMJIT_PROPAGATE(_newNodeT<InvokeNode>(&node, instId, InstOptions::kNone));
ASMJIT_PROPAGATE(new_node_t<InvokeNode>(Out(node), inst_id, InstOptions::kNone));
node->setOpCount(1);
node->setOp(0, o0);
node->resetOpRange(1, node->opCapacity());
node->set_op_count(1);
node->set_op(0, o0);
node->reset_op_range(1, node->op_capacity());
Error err = node->detail().init(signature, environment());
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
// Skip the allocation if there are no arguments.
uint32_t argCount = signature.argCount();
if (argCount) {
node->_args = _codeZone.alloc<InvokeNode::OperandPack>(argCount * sizeof(InvokeNode::OperandPack));
uint32_t arg_count = signature.arg_count();
if (arg_count) {
node->_args = _builder_arena.alloc_oneshot<InvokeNode::OperandPack>(arg_count * sizeof(InvokeNode::OperandPack));
if (!node->_args) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
return report_error(make_error(Error::kOutOfMemory));
}
memset(node->_args, 0, argCount * sizeof(InvokeNode::OperandPack));
memset(node->_args, 0, arg_count * sizeof(InvokeNode::OperandPack));
}
*out = node;
return kErrorOk;
out = node;
return Error::kOk;
}
Error BaseCompiler::addInvokeNode(InvokeNode** out, InstId instId, const Operand_& o0, const FuncSignature& signature) {
State state = _grabState();
Error BaseCompiler::add_invoke_node(Out<InvokeNode*> out, InstId inst_id, const Operand_& o0, const FuncSignature& signature) {
State state = _grab_state();
ASMJIT_PROPAGATE(newInvokeNode(out, instId, o0, signature));
ASMJIT_ASSUME(*out != nullptr);
ASMJIT_PROPAGATE(new_invoke_node(out, inst_id, o0, signature));
Builder_assign_inst_state(this, *out, state);
BaseBuilder_assignInstState(this, *out, state);
addNode(*out);
return kErrorOk;
add_node(*out);
return Error::kOk;
}
// BaseCompiler - Virtual Registers
// ================================
Error BaseCompiler::newVirtReg(VirtReg** out, TypeId typeId, OperandSignature signature, const char* name) {
*out = nullptr;
uint32_t index = _vRegArray.size();
Error BaseCompiler::new_virt_reg(Out<VirtReg*> out, TypeId type_id, OperandSignature signature, const char* name) {
out = nullptr;
size_t index = _virt_regs.size();
if (ASMJIT_UNLIKELY(index >= uint32_t(Operand::kVirtIdCount))) {
return reportError(DebugUtils::errored(kErrorTooManyVirtRegs));
if (ASMJIT_UNLIKELY(index >= size_t(Operand::kVirtIdCount))) {
return report_error(make_error(Error::kTooManyVirtRegs));
}
if (ASMJIT_UNLIKELY(_vRegArray.willGrow(&_allocator) != kErrorOk)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
if (ASMJIT_UNLIKELY(_virt_regs.reserve_additional(_builder_arena) != Error::kOk)) {
return report_error(make_error(Error::kOutOfMemory));
}
void* vRegPtr = _codeZone.alloc(Zone::alignedSizeOf<VirtReg>());
if (ASMJIT_UNLIKELY(!vRegPtr)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
void* virt_reg_ptr = _builder_arena.alloc_oneshot(Arena::aligned_size_of<VirtReg>());
if (ASMJIT_UNLIKELY(!virt_reg_ptr)) {
return report_error(make_error(Error::kOutOfMemory));
}
uint32_t size = TypeUtils::sizeOf(typeId);
uint32_t alignment = Support::min<uint32_t>(size, 64);
VirtReg* vReg = new(Support::PlacementNew{vRegPtr}) VirtReg(signature, Operand::indexToVirtId(index), size, alignment, typeId);
uint32_t size = TypeUtils::size_of(type_id);
uint32_t alignment_log2 = 31 - Support::clz(Support::min<uint32_t>(size, 64) | 1u);
VirtRegFlags flags = VirtReg::_flags_from_alignment_log2(alignment_log2);
VirtReg* virt_reg = new(Support::PlacementNew{virt_reg_ptr}) VirtReg(signature.reg_type(), flags, Operand::virt_index_to_virt_id(uint32_t(index)), size, type_id);
#ifndef ASMJIT_NO_LOGGING
if (name && name[0] != '\0') {
vReg->_name.setData(&_codeZone, name, SIZE_MAX);
virt_reg->_name.set_data(_builder_arena, name, SIZE_MAX);
}
#else
DebugUtils::unused(name);
Support::maybe_unused(name);
#endif
_vRegArray.appendUnsafe(vReg);
*out = vReg;
_virt_regs.append_unchecked(virt_reg);
out = virt_reg;
return kErrorOk;
return Error::kOk;
}
Error BaseCompiler::_newReg(Reg* out, TypeId typeId, const char* name) {
OperandSignature regSignature;
Error BaseCompiler::_new_reg(Out<Reg> out, TypeId type_id, const char* name) {
OperandSignature reg_signature;
out->reset();
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
Error err = ArchUtils::type_id_to_reg_signature(arch(), type_id, Out(type_id), Out(reg_signature));
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
VirtReg* virt_reg;
ASMJIT_PROPAGATE(new_virt_reg(Out(virt_reg), type_id, reg_signature, name));
ASMJIT_ASSUME(virt_reg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
out->_init_reg(reg_signature, virt_reg->id());
return Error::kOk;
}
Error BaseCompiler::_newRegFmt(Reg* out, TypeId typeId, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
va_end(ap);
return _newReg(out, typeId, sb.data());
}
Error BaseCompiler::_newReg(Reg* out, const Reg& ref, const char* name) {
Error BaseCompiler::_new_reg(Out<Reg> out, const Reg& ref, const char* name) {
out->reset();
OperandSignature regSignature;
TypeId typeId;
OperandSignature reg_signature;
TypeId type_id;
if (isVirtRegValid(ref)) {
VirtReg* vRef = virtRegByReg(ref);
typeId = vRef->typeId();
if (is_virt_reg_valid(ref)) {
VirtReg* v_ref = virt_reg_by_reg(ref);
type_id = v_ref->type_id();
// NOTE: It's possible to cast one register type to another if it's the same register group. However, VirtReg
// always contains the TypeId that was used to create the register. This means that in some cases we may end
// up having different size of `ref` and `vRef`. In such case we adjust the TypeId to match the `ref` register
// up having different size of `ref` and `v_ref`. In such case we adjust the TypeId to match the `ref` register
// type instead of the original register type, which should be the expected behavior.
uint32_t typeSize = TypeUtils::sizeOf(typeId);
uint32_t refSize = ref.size();
uint32_t type_size = TypeUtils::size_of(type_id);
uint32_t ref_size = ref.size();
if (typeSize != refSize) {
if (TypeUtils::isInt(typeId)) {
// GP register - change TypeId to match `ref`, but keep sign of `vRef`.
switch (refSize) {
case 1: typeId = TypeId(uint32_t(TypeId::kInt8 ) | (uint32_t(typeId) & 1)); break;
case 2: typeId = TypeId(uint32_t(TypeId::kInt16) | (uint32_t(typeId) & 1)); break;
case 4: typeId = TypeId(uint32_t(TypeId::kInt32) | (uint32_t(typeId) & 1)); break;
case 8: typeId = TypeId(uint32_t(TypeId::kInt64) | (uint32_t(typeId) & 1)); break;
default: typeId = TypeId::kVoid; break;
if (type_size != ref_size) {
if (TypeUtils::is_int(type_id)) {
// GP register - change TypeId to match `ref`, but keep sign of `v_ref`.
switch (ref_size) {
case 1: type_id = TypeId(uint32_t(TypeId::kInt8 ) | (uint32_t(type_id) & 1)); break;
case 2: type_id = TypeId(uint32_t(TypeId::kInt16) | (uint32_t(type_id) & 1)); break;
case 4: type_id = TypeId(uint32_t(TypeId::kInt32) | (uint32_t(type_id) & 1)); break;
case 8: type_id = TypeId(uint32_t(TypeId::kInt64) | (uint32_t(type_id) & 1)); break;
default: type_id = TypeId::kVoid; break;
}
}
else if (TypeUtils::isMmx(typeId)) {
else if (TypeUtils::is_mmx(type_id)) {
// MMX register - always use 64-bit.
typeId = TypeId::kMmx64;
type_id = TypeId::kMmx64;
}
else if (TypeUtils::isMask(typeId)) {
else if (TypeUtils::is_mask(type_id)) {
// Mask register - change TypeId to match `ref` size.
switch (refSize) {
case 1: typeId = TypeId::kMask8; break;
case 2: typeId = TypeId::kMask16; break;
case 4: typeId = TypeId::kMask32; break;
case 8: typeId = TypeId::kMask64; break;
default: typeId = TypeId::kVoid; break;
switch (ref_size) {
case 1: type_id = TypeId::kMask8; break;
case 2: type_id = TypeId::kMask16; break;
case 4: type_id = TypeId::kMask32; break;
case 8: type_id = TypeId::kMask64; break;
default: type_id = TypeId::kVoid; break;
}
}
else {
// Vector register - change TypeId to match `ref` size, keep vector metadata.
TypeId scalarTypeId = TypeUtils::scalarOf(typeId);
switch (refSize) {
case 16: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec128Start); break;
case 32: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec256Start); break;
case 64: typeId = TypeUtils::scalarToVector(scalarTypeId, TypeId::_kVec512Start); break;
default: typeId = TypeId::kVoid; break;
TypeId scalar_type_id = TypeUtils::scalar_of(type_id);
switch (ref_size) {
case 16: type_id = TypeUtils::scalar_to_vector(scalar_type_id, TypeId::_kVec128Start); break;
case 32: type_id = TypeUtils::scalar_to_vector(scalar_type_id, TypeId::_kVec256Start); break;
case 64: type_id = TypeUtils::scalar_to_vector(scalar_type_id, TypeId::_kVec512Start); break;
default: type_id = TypeId::kVoid; break;
}
}
if (typeId == TypeId::kVoid) {
return reportError(DebugUtils::errored(kErrorInvalidState));
if (type_id == TypeId::kVoid) {
return report_error(make_error(Error::kInvalidState));
}
}
}
else {
typeId = RegUtils::typeIdOf(ref.regType());
type_id = RegUtils::type_id_of(ref.reg_type());
}
Error err = ArchUtils::typeIdToRegSignature(arch(), typeId, &typeId, &regSignature);
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
Error err = ArchUtils::type_id_to_reg_signature(arch(), type_id, Out(type_id), Out(reg_signature));
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, typeId, regSignature, name));
ASMJIT_ASSUME(vReg != nullptr);
VirtReg* virt_reg;
ASMJIT_PROPAGATE(new_virt_reg(Out(virt_reg), type_id, reg_signature, name));
ASMJIT_ASSUME(virt_reg != nullptr);
out->_initReg(regSignature, vReg->id());
return kErrorOk;
out->_init_reg(reg_signature, virt_reg->id());
return Error::kOk;
}
Error BaseCompiler::_newRegFmt(Reg* out, const Reg& ref, const char* fmt, ...) {
Error BaseCompiler::_new_reg_fmt(Out<Reg> out, TypeId type_id, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.appendVFormat(fmt, ap);
sb.append_vformat(fmt, ap);
va_end(ap);
return _newReg(out, ref, sb.data());
return _new_reg(out, type_id, sb.data());
}
Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, const char* name) {
Error BaseCompiler::_new_reg_fmt(Out<Reg> out, const Reg& ref, const char* fmt, ...) {
va_list ap;
StringTmp<256> sb;
va_start(ap, fmt);
sb.append_vformat(fmt, ap);
va_end(ap);
return _new_reg(out, ref, sb.data());
}
Error BaseCompiler::_new_stack(Out<BaseMem> out, uint32_t size, uint32_t alignment, const char* name) {
out->reset();
if (ASMJIT_UNLIKELY(Support::bool_or(size == 0, !Support::isZeroOrPowerOf2(alignment)))) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (ASMJIT_UNLIKELY(Support::bool_or(size == 0, !Support::is_zero_or_power_of_2(alignment)))) {
return report_error(make_error(Error::kInvalidArgument));
}
if (alignment == 0u) {
@@ -391,86 +388,83 @@ Error BaseCompiler::_newStack(BaseMem* out, uint32_t size, uint32_t alignment, c
alignment = 64u;
}
VirtReg* vReg;
ASMJIT_PROPAGATE(newVirtReg(&vReg, TypeId::kVoid, OperandSignature{0}, name));
ASMJIT_ASSUME(vReg != nullptr);
VirtReg* virt_reg;
ASMJIT_PROPAGATE(new_virt_reg(Out(virt_reg), TypeId::kVoid, OperandSignature{0}, name));
ASMJIT_ASSUME(virt_reg != nullptr);
vReg->_virtSize = size;
vReg->_isStack = true;
vReg->_alignment = uint8_t(alignment);
virt_reg->_virt_size = size;
virt_reg->_reg_flags |= VirtRegFlags::kIsStackArea | VirtReg::_flags_from_alignment_log2(Support::ctz(alignment));
// Set the memory operand to GPD/GPQ and its id to VirtReg.
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(_gpSignature.regType()) |
OperandSignature::fromBits(OperandSignature::kMemRegHomeFlag),
vReg->id(), 0, 0);
return kErrorOk;
out = BaseMem(OperandSignature::from_op_type(OperandType::kMem) |
OperandSignature::from_mem_base_type(_gp_signature.reg_type()) |
OperandSignature::from_bits(OperandSignature::kMemRegHomeFlag),
virt_reg->id(), 0, 0);
return Error::kOk;
}
Error BaseCompiler::setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment) {
if (!isVirtIdValid(virtId)) {
return DebugUtils::errored(kErrorInvalidVirtId);
Error BaseCompiler::set_stack_size(uint32_t virt_id, uint32_t new_size, uint32_t new_alignment) {
if (!is_virt_id_valid(virt_id)) {
return make_error(Error::kInvalidVirtId);
}
if (!Support::isZeroOrPowerOf2(newAlignment)) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (!Support::is_zero_or_power_of_2(new_alignment)) {
return report_error(make_error(Error::kInvalidArgument));
}
if (newAlignment > 64u) {
newAlignment = 64u;
VirtReg* virt_reg = virt_reg_by_id(virt_id);
if (new_size) {
virt_reg->_virt_size = new_size;
}
VirtReg* vReg = virtRegById(virtId);
if (newSize) {
vReg->_virtSize = newSize;
}
if (newAlignment) {
vReg->_alignment = uint8_t(newAlignment);
if (new_alignment) {
uint32_t alignment_log2 = Support::ctz(Support::min<uint32_t>(new_alignment, 64u));
virt_reg->_reg_flags = (virt_reg->_reg_flags & ~VirtRegFlags::kAlignmentLog2Mask) | VirtReg::_flags_from_alignment_log2(alignment_log2);
}
// This is required if the RAPass is already running. There is a chance that a stack-slot has been already
// allocated and in that case it has to be updated as well, otherwise we would allocate wrong amount of memory.
RAWorkReg* workReg = vReg->_workReg;
if (workReg && workReg->_stackSlot) {
workReg->_stackSlot->_size = vReg->_virtSize;
workReg->_stackSlot->_alignment = vReg->_alignment;
RAWorkReg* work_reg = virt_reg->_work_reg;
if (work_reg && work_reg->_stack_slot) {
work_reg->_stack_slot->_size = virt_reg->virt_size();
work_reg->_stack_slot->_alignment = uint8_t(virt_reg->alignment());
}
return kErrorOk;
return Error::kOk;
}
Error BaseCompiler::_newConst(BaseMem* out, ConstPoolScope scope, const void* data, size_t size) {
Error BaseCompiler::_new_const(Out<BaseMem> out, ConstPoolScope scope, const void* data, size_t size) {
out->reset();
if (uint32_t(scope) > 1) {
return reportError(DebugUtils::errored(kErrorInvalidArgument));
if (scope > ConstPoolScope::kMaxValue) {
return report_error(make_error(Error::kInvalidArgument));
}
if (!_constPools[uint32_t(scope)]) {
ASMJIT_PROPAGATE(newConstPoolNode(&_constPools[uint32_t(scope)]));
if (!_const_pools[uint32_t(scope)]) {
ASMJIT_PROPAGATE(new_const_pool_node(Out(_const_pools[uint32_t(scope)])));
}
ConstPoolNode* pool = _constPools[uint32_t(scope)];
ConstPoolNode* pool = _const_pools[uint32_t(scope)];
size_t off;
Error err = pool->add(data, size, off);
Error err = pool->add(data, size, Out(off));
if (ASMJIT_UNLIKELY(err)) {
return reportError(err);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
return report_error(err);
}
*out = BaseMem(OperandSignature::fromOpType(OperandType::kMem) |
OperandSignature::fromMemBaseType(RegType::kLabelTag) |
OperandSignature::fromSize(uint32_t(size)),
pool->labelId(), 0, int32_t(off));
return kErrorOk;
out = BaseMem(OperandSignature::from_op_type(OperandType::kMem) |
OperandSignature::from_mem_base_type(RegType::kLabelTag) |
OperandSignature::from_size(uint32_t(size)),
pool->label_id(), 0, int32_t(off));
return Error::kOk;
}
void BaseCompiler::rename(const Reg& reg, const char* fmt, ...) {
if (!reg.isVirtReg()) return;
if (!reg.is_virt_reg()) return;
VirtReg* vReg = virtRegById(reg.id());
if (!vReg) {
VirtReg* virt_reg = virt_reg_by_id(reg.id());
if (!virt_reg) {
return;
}
@@ -482,58 +476,58 @@ void BaseCompiler::rename(const Reg& reg, const char* fmt, ...) {
vsnprintf(buf, ASMJIT_ARRAY_SIZE(buf), fmt, ap);
va_end(ap);
vReg->_name.setData(&_codeZone, buf, SIZE_MAX);
virt_reg->_name.set_data(_builder_arena, buf, SIZE_MAX);
}
}
// BaseCompiler - Jump Annotations
// ===============================
Error BaseCompiler::newJumpNode(JumpNode** out, InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _codeZone.alloc<JumpNode>();
Error BaseCompiler::new_jump_node(Out<JumpNode*> out, InstId inst_id, InstOptions inst_options, const Operand_& o0, JumpAnnotation* annotation) {
JumpNode* node = _builder_arena.alloc_oneshot<JumpNode>();
*out = node;
if (ASMJIT_UNLIKELY(!node)) {
return reportError(DebugUtils::errored(kErrorOutOfMemory));
return report_error(make_error(Error::kOutOfMemory));
}
uint32_t opCount = 1;
node = new(Support::PlacementNew{node}) JumpNode(instId, instOptions, opCount, annotation);
node->setOp(0, o0);
node->resetOpRange(opCount, JumpNode::kBaseOpCapacity);
uint32_t op_count = 1;
node = new(Support::PlacementNew{node}) JumpNode(inst_id, inst_options, op_count, annotation);
node->set_op(0, o0);
node->reset_op_range(op_count, JumpNode::kBaseOpCapacity);
return kErrorOk;
return Error::kOk;
}
Error BaseCompiler::emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation) {
State state = _grabState();
Error BaseCompiler::emit_annotated_jump(InstId inst_id, const Operand_& o0, JumpAnnotation* annotation) {
State state = _grab_state();
JumpNode* node;
ASMJIT_PROPAGATE(newJumpNode(&node, instId, state.options, o0, annotation));
ASMJIT_PROPAGATE(new_jump_node(Out(node), inst_id, state.options, o0, annotation));
node->setExtraReg(state.extraReg);
BaseBuilder_assignInlineComment(this, node, state.comment);
node->set_extra_reg(state.extra_reg);
Builder_assign_inline_comment(this, node, state.comment);
addNode(node);
return kErrorOk;
add_node(node);
return Error::kOk;
}
JumpAnnotation* BaseCompiler::newJumpAnnotation() {
if (_jumpAnnotations.grow(&_allocator, 1) != kErrorOk) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
JumpAnnotation* BaseCompiler::new_jump_annotation() {
if (_jump_annotations.reserve_additional(_builder_arena, 1) != Error::kOk) {
report_error(make_error(Error::kOutOfMemory));
return nullptr;
}
uint32_t id = _jumpAnnotations.size();
JumpAnnotation* jumpAnnotation = _codeZone.newT<JumpAnnotation>(this, id);
uint32_t id = uint32_t(_jump_annotations.size());
JumpAnnotation* jump_annotation = _builder_arena.new_oneshot<JumpAnnotation>(this, id);
if (!jumpAnnotation) {
reportError(DebugUtils::errored(kErrorOutOfMemory));
if (!jump_annotation) {
report_error(make_error(Error::kOutOfMemory));
return nullptr;
}
_jumpAnnotations.appendUnsafe(jumpAnnotation);
return jumpAnnotation;
_jump_annotations.append_unchecked(jump_annotation);
return jump_annotation;
}
// BaseCompiler - Events
@@ -541,40 +535,40 @@ JumpAnnotation* BaseCompiler::newJumpAnnotation() {
static ASMJIT_INLINE void BaseCompiler_clear(BaseCompiler* self) noexcept {
self->_func = nullptr;
self->_constPools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
self->_constPools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
self->_vRegArray.reset();
self->_const_pools[uint32_t(ConstPoolScope::kLocal)] = nullptr;
self->_const_pools[uint32_t(ConstPoolScope::kGlobal)] = nullptr;
self->_virt_regs.reset();
}
static ASMJIT_INLINE Error BaseCompiler_initDefaultPasses(BaseCompiler* self) noexcept {
return self->addPassT<GlobalConstPoolPass>();
return self->add_pass<GlobalConstPoolPass>();
}
Error BaseCompiler::onAttach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::onAttach(code));
Error BaseCompiler::on_attach(CodeHolder& code) noexcept {
ASMJIT_PROPAGATE(Base::on_attach(code));
Error err = BaseCompiler_initDefaultPasses(this);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
on_detach(code);
return err;
}
return kErrorOk;
return Error::kOk;
}
Error BaseCompiler::onDetach(CodeHolder& code) noexcept {
Error BaseCompiler::on_detach(CodeHolder& code) noexcept {
BaseCompiler_clear(this);
return Base::onDetach(code);
return Base::on_detach(code);
}
Error BaseCompiler::onReinit(CodeHolder& code) noexcept {
Error BaseCompiler::on_reinit(CodeHolder& code) noexcept {
BaseCompiler_clear(this);
Error err = Base::onReinit(code);
Error err = Base::on_reinit(code);
if (ASMJIT_LIKELY(err == kErrorOk)) {
if (ASMJIT_LIKELY(err == Error::kOk)) {
err = BaseCompiler_initDefaultPasses(this);
if (ASMJIT_UNLIKELY(err)) {
onDetach(code);
if (ASMJIT_UNLIKELY(err != Error::kOk)) {
on_detach(code);
return err;
}
}
@@ -585,35 +579,35 @@ Error BaseCompiler::onReinit(CodeHolder& code) noexcept {
// FuncPass - Construction & Destruction
// =====================================
FuncPass::FuncPass(const char* name) noexcept
: Pass(name) {}
FuncPass::FuncPass(BaseCompiler& cc, const char* name) noexcept
: Pass(cc, name) {}
// FuncPass - Run
// ==============
Error FuncPass::run(Zone* zone, Logger* logger) {
BaseNode* node = cb()->firstNode();
Error FuncPass::run(Arena& arena, Logger* logger) {
BaseNode* node = cc().first_node();
while (node) {
if (node->type() == NodeType::kFunc) {
FuncNode* func = node->as<FuncNode>();
node = func->endNode();
ASMJIT_PROPAGATE(runOnFunction(zone, logger, func));
}
// Find a function by skipping all nodes that are not `NodeType::kFunc`.
do {
if (node->type() != NodeType::kFunc) {
node = node->next();
} while (node && node->type() != NodeType::kFunc);
continue;
}
else {
FuncNode* func = node->as<FuncNode>();
node = func->end_node();
ASMJIT_PROPAGATE(run_on_function(arena, logger, func));
}
}
return kErrorOk;
return Error::kOk;
}
// [[pure virtual]]
Error FuncPass::runOnFunction(Zone* zone, Logger* logger, FuncNode* func) {
DebugUtils::unused(zone, logger, func);
return DebugUtils::errored(kErrorInvalidState);
Error FuncPass::run_on_function(Arena& arena, Logger* logger, FuncNode* func) {
Support::maybe_unused(arena, logger, func);
return make_error(Error::kInvalidState);
}
ASMJIT_END_NAMESPACE

View File

@@ -9,6 +9,8 @@
#include "../core/api-config.h"
#ifndef ASMJIT_NO_COMPILER
#include "../core/arena.h"
#include "../core/arenavector.h"
#include "../core/assembler.h"
#include "../core/builder.h"
#include "../core/constpool.h"
@@ -17,8 +19,6 @@
#include "../core/inst.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonevector.h"
ASMJIT_BEGIN_NAMESPACE
@@ -59,14 +59,14 @@ public:
//! Current function.
FuncNode* _func;
//! Stores array of `VirtReg` pointers.
ZoneVector<VirtReg*> _vRegArray;
ArenaVector<VirtReg*> _virt_regs;
//! Stores jump annotations.
ZoneVector<JumpAnnotation*> _jumpAnnotations;
ArenaVector<JumpAnnotation*> _jump_annotations;
//! Local and global constant pools.
//!
//! Local constant pool is flushed with each function, global constant pool is flushed only by \ref finalize().
ConstPoolNode* _constPools[2];
ConstPoolNode* _const_pools[2];
//! \}
@@ -80,46 +80,59 @@ public:
//! \}
//! \name Passes
//! \{
//! \overload
template<typename PassT, typename... Args>
[[nodiscard]]
ASMJIT_INLINE PassT* new_pass(Args&&... args) noexcept { return _builder_arena.new_oneshot<PassT>(*this, std::forward<Args>(args)...); }
template<typename T, typename... Args>
ASMJIT_INLINE Error add_pass(Args&&... args) { return _add_pass(new_pass<T, Args...>(std::forward<Args>(args)...)); }
//! \}
//! \name Function Management
//! \{
//! Creates a new \ref FuncNode.
ASMJIT_API Error newFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
ASMJIT_API Error new_func_node(Out<FuncNode*> out, const FuncSignature& signature);
//! Creates a new \ref FuncNode adds it to the instruction stream.
ASMJIT_API Error addFuncNode(FuncNode** ASMJIT_NONNULL(out), const FuncSignature& signature);
ASMJIT_API Error add_func_node(Out<FuncNode*> out, const FuncSignature& signature);
//! Creates a new \ref FuncRetNode.
ASMJIT_API Error newFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
ASMJIT_API Error new_func_ret_node(Out<FuncRetNode*> out, const Operand_& o0, const Operand_& o1);
//! Creates a new \ref FuncRetNode and adds it to the instruction stream.
ASMJIT_API Error addFuncRetNode(FuncRetNode** ASMJIT_NONNULL(out), const Operand_& o0, const Operand_& o1);
ASMJIT_API Error add_func_ret_node(Out<FuncRetNode*> out, const Operand_& o0, const Operand_& o1);
//! Returns the current function.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncNode* func() const noexcept { return _func; }
//! Creates a new \ref FuncNode with the given `signature` and returns it.
inline FuncNode* newFunc(const FuncSignature& signature) {
ASMJIT_INLINE FuncNode* new_func(const FuncSignature& signature) {
FuncNode* node;
newFuncNode(&node, signature);
new_func_node(Out(node), signature);
return node;
}
//! Creates a new \ref FuncNode with the given `signature`, adds it to the instruction stream by using
//! the \ref addFunc(FuncNode*) overload, and returns it.
inline FuncNode* addFunc(const FuncSignature& signature) {
//! `add_func(FuncNode* func)` overload, and returns the node.
ASMJIT_INLINE FuncNode* add_func(const FuncSignature& signature) {
FuncNode* node;
addFuncNode(&node, signature);
add_func_node(Out(node), signature);
return node;
}
//! Adds a function `node` to the instruction stream.
ASMJIT_API FuncNode* addFunc(FuncNode* ASMJIT_NONNULL(func));
ASMJIT_API FuncNode* add_func(FuncNode* ASMJIT_NONNULL(func));
//! Emits a sentinel that marks the end of the current function.
ASMJIT_API Error endFunc();
ASMJIT_API Error end_func();
inline Error addRet(const Operand_& o0, const Operand_& o1) {
ASMJIT_INLINE Error add_ret(const Operand_& o0, const Operand_& o1) {
FuncRetNode* node;
return addFuncRetNode(&node, o0, o1);
return add_func_ret_node(Out(node), o0, o1);
}
//! \}
@@ -128,73 +141,72 @@ public:
//! \{
//! Creates a new \ref InvokeNode.
ASMJIT_API Error newInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
ASMJIT_API Error new_invoke_node(Out<InvokeNode*> out, InstId inst_id, const Operand_& o0, const FuncSignature& signature);
//! Creates a new \ref InvokeNode and adds it to the instruction stream.
ASMJIT_API Error addInvokeNode(InvokeNode** ASMJIT_NONNULL(out), InstId instId, const Operand_& o0, const FuncSignature& signature);
ASMJIT_API Error add_invoke_node(Out<InvokeNode*> out, InstId inst_id, const Operand_& o0, const FuncSignature& signature);
//! \}
//! \name Virtual Registers
//! \{
//! Creates a new virtual register representing the given `typeId` and `signature`.
//! Creates a new virtual register representing the given `type_id` and `signature`.
//!
//! \note This function is public, but it's not generally recommended to be used by AsmJit users, use architecture
//! specific `newReg()` functionality instead or functions like \ref _newReg() and \ref _newRegFmt().
ASMJIT_API Error newVirtReg(VirtReg** ASMJIT_NONNULL(out), TypeId typeId, OperandSignature signature, const char* name);
//! specific `new_reg()` functionality instead or functions like \ref _new_reg() and \ref _new_reg_fmt().
ASMJIT_API Error new_virt_reg(Out<VirtReg*> out, TypeId type_id, OperandSignature signature, const char* name);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
ASMJIT_API Error _newReg(Reg* ASMJIT_NONNULL(out), TypeId typeId, const char* name = nullptr);
//! Creates a new virtual register of the given `type_id` and stores it to `out` operand.
ASMJIT_API Error _new_reg(Out<Reg> out, TypeId type_id, const char* name = nullptr);
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _new_reg(Out<Reg> out, const Reg& ref, const char* name = nullptr);
//! Creates a new virtual register of the given `typeId` and stores it to `out` operand.
//! Creates a new virtual register of the given `type_id` and stores it to `out` operand.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(Reg* ASMJIT_NONNULL(out), TypeId typeId, const char* fmt, ...);
//! \note This version accepts a snprintf() format `fmt` followed by variadic arguments.
ASMJIT_API Error _new_reg_fmt(Out<Reg> out, TypeId type_id, const char* fmt, ...);
//! \overload
inline Error _newRegFmt(Reg* ASMJIT_NONNULL(out), TypeId typeId) { return _newRegFmt(out, typeId, nullptr); }
//! Creates a new virtual register compatible with the provided reference register `ref`.
ASMJIT_API Error _newReg(Reg* ASMJIT_NONNULL(out), const Reg& ref, const char* name = nullptr);
ASMJIT_INLINE Error _new_reg_fmt(Out<Reg> out, TypeId type_id) { return _new_reg(out, type_id); }
//! Creates a new virtual register compatible with the provided reference register `ref`.
//!
//! \note This version accepts a snprintf() format `fmt` followed by a variadic arguments.
ASMJIT_API Error _newRegFmt(Reg* ASMJIT_NONNULL(out), const Reg& ref, const char* fmt, ...);
//! \note This version accepts a snprintf() format `fmt` followed by variadic arguments.
ASMJIT_API Error _new_reg_fmt(Out<Reg> out, const Reg& ref, const char* fmt, ...);
//! Tests whether the given `id` is a valid virtual register id.
//! Tests whether the given `virt_id` is a valid virtual register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtIdValid(uint32_t id) const noexcept {
uint32_t index = Operand::virtIdToIndex(id);
return index < _vRegArray.size();
ASMJIT_INLINE_NODEBUG bool is_virt_id_valid(uint32_t virt_id) const noexcept {
uint32_t index = Operand::virt_id_to_index(virt_id);
return index < _virt_regs.size();
}
//! Tests whether the given `reg` is a virtual register having a valid id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isVirtRegValid(const Reg& reg) const noexcept {
return isVirtIdValid(reg.id());
ASMJIT_INLINE_NODEBUG bool is_virt_reg_valid(const Reg& reg) const noexcept {
return is_virt_id_valid(reg.id());
}
//! Returns \ref VirtReg associated with the given `id`.
//! Returns \ref VirtReg associated with the given `virt_id`.
[[nodiscard]]
inline VirtReg* virtRegById(uint32_t id) const noexcept {
ASMJIT_ASSERT(isVirtIdValid(id));
return _vRegArray[Operand::virtIdToIndex(id)];
ASMJIT_INLINE VirtReg* virt_reg_by_id(uint32_t virt_id) const noexcept {
ASMJIT_ASSERT(is_virt_id_valid(virt_id));
return _virt_regs[Operand::virt_id_to_index(virt_id)];
}
//! Returns \ref VirtReg associated with the given `reg`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByReg(const Reg& reg) const noexcept { return virtRegById(reg.id()); }
ASMJIT_INLINE_NODEBUG VirtReg* virt_reg_by_reg(const Reg& reg) const noexcept { return virt_reg_by_id(reg.id()); }
//! Returns \ref VirtReg associated with the given virtual register `index`.
//!
//! \note This is not the same as virtual register id. The conversion between id and its index is implemented
//! by \ref Operand_::virtIdToIndex() and \ref Operand_::indexToVirtId() functions.
//! by \ref Operand_::virt_id_to_index() and \ref Operand_::virt_index_to_virt_id() functions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG VirtReg* virtRegByIndex(uint32_t index) const noexcept { return _vRegArray[index]; }
ASMJIT_INLINE_NODEBUG VirtReg* virt_reg_by_index(uint32_t index) const noexcept { return _virt_regs[index]; }
//! Returns an array of all virtual registers managed by the Compiler.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<VirtReg*>& virtRegs() const noexcept { return _vRegArray; }
ASMJIT_INLINE_NODEBUG Span<VirtReg*> virt_regs() const noexcept { return _virt_regs.as_span(); }
//! \name Stack
//! \{
@@ -202,14 +214,14 @@ public:
//! Creates a new stack of the given `size` and `alignment` and stores it to `out`.
//!
//! \note `name` can be used to give the stack a name, for debugging purposes.
ASMJIT_API Error _newStack(BaseMem* ASMJIT_NONNULL(out), uint32_t size, uint32_t alignment, const char* name = nullptr);
ASMJIT_API Error _new_stack(Out<BaseMem> out, uint32_t size, uint32_t alignment, const char* name = nullptr);
//! Updates the stack size of a stack created by `_newStack()` by its `virtId`.
ASMJIT_API Error setStackSize(uint32_t virtId, uint32_t newSize, uint32_t newAlignment = 0);
//! Updates the stack size of a stack created by `_new_stack()` by its `virt_id`.
ASMJIT_API Error set_stack_size(uint32_t virt_id, uint32_t new_size, uint32_t new_alignment = 0);
//! Updates the stack size of a stack created by `_newStack()`.
ASMJIT_INLINE_NODEBUG Error setStackSize(const BaseMem& mem, uint32_t newSize, uint32_t newAlignment = 0) {
return setStackSize(mem.id(), newSize, newAlignment);
//! Updates the stack size of a stack created by `_new_stack()`.
ASMJIT_INLINE_NODEBUG Error set_stack_size(const BaseMem& mem, uint32_t new_size, uint32_t new_alignment = 0) {
return set_stack_size(mem.id(), new_size, new_alignment);
}
//! \}
@@ -221,7 +233,7 @@ public:
//!
//! This function adds a constant of the given `size` to the built-in \ref ConstPool and stores the reference to that
//! constant to the `out` operand.
ASMJIT_API Error _newConst(BaseMem* ASMJIT_NONNULL(out), ConstPoolScope scope, const void* data, size_t size);
ASMJIT_API Error _new_const(Out<BaseMem> out, ConstPoolScope scope, const void* data, size_t size);
//! \}
@@ -237,26 +249,24 @@ public:
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<JumpAnnotation*>& jumpAnnotations() const noexcept {
return _jumpAnnotations;
}
ASMJIT_INLINE_NODEBUG Span<JumpAnnotation*> jump_annotations() const noexcept { return _jump_annotations.as_span(); }
ASMJIT_API Error newJumpNode(JumpNode** ASMJIT_NONNULL(out), InstId instId, InstOptions instOptions, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emitAnnotatedJump(InstId instId, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error new_jump_node(Out<JumpNode*> out, InstId inst_id, InstOptions inst_options, const Operand_& o0, JumpAnnotation* annotation);
ASMJIT_API Error emit_annotated_jump(InstId inst_id, const Operand_& o0, JumpAnnotation* annotation);
//! Returns a new `JumpAnnotation` instance, which can be used to aggregate possible targets of a jump where the
//! target is not a label, for example to implement jump tables.
[[nodiscard]]
ASMJIT_API JumpAnnotation* newJumpAnnotation();
ASMJIT_API JumpAnnotation* new_jump_annotation();
//! \}
//! \name Events
//! \{
ASMJIT_API Error onAttach(CodeHolder& code) noexcept override;
ASMJIT_API Error onDetach(CodeHolder& code) noexcept override;
ASMJIT_API Error onReinit(CodeHolder& code) noexcept override;
ASMJIT_API Error on_attach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_detach(CodeHolder& code) noexcept override;
ASMJIT_API Error on_reinit(CodeHolder& code) noexcept override;
//! \}
};
@@ -277,18 +287,18 @@ public:
//! Compiler that owns this JumpAnnotation.
BaseCompiler* _compiler;
//! Annotation identifier.
uint32_t _annotationId;
//! Vector of label identifiers, see \ref labelIds().
ZoneVector<uint32_t> _labelIds;
uint32_t _annotation_id;
//! Vector of label identifiers, see \ref label_ids().
ArenaVector<uint32_t> _label_ids;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG JumpAnnotation(BaseCompiler* ASMJIT_NONNULL(compiler), uint32_t annotationId) noexcept
ASMJIT_INLINE_NODEBUG JumpAnnotation(BaseCompiler* ASMJIT_NONNULL(compiler), uint32_t annotation_id) noexcept
: _compiler(compiler),
_annotationId(annotationId) {}
_annotation_id(annotation_id) {}
//! \}
@@ -301,19 +311,19 @@ public:
//! Returns the annotation id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t annotationId() const noexcept { return _annotationId; }
ASMJIT_INLINE_NODEBUG uint32_t annotation_id() const noexcept { return _annotation_id; }
//! Returns a vector of label identifiers that lists all targets of the jump.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ZoneVector<uint32_t>& labelIds() const noexcept { return _labelIds; }
ASMJIT_INLINE_NODEBUG Span<uint32_t> label_ids() const noexcept { return _label_ids.as_span(); }
//! Tests whether the given `label` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabel(const Label& label) const noexcept { return hasLabelId(label.id()); }
ASMJIT_INLINE_NODEBUG bool has_label(const Label& label) const noexcept { return has_label_id(label.id()); }
//! Tests whether the given `labelId` is a target of this JumpAnnotation.
//! Tests whether the given `label_id` is a target of this JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLabelId(uint32_t labelId) const noexcept { return _labelIds.contains(labelId); }
ASMJIT_INLINE_NODEBUG bool has_label_id(uint32_t label_id) const noexcept { return _label_ids.contains(label_id); }
//! \}
@@ -321,9 +331,9 @@ public:
//! \{
//! Adds the `label` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabel(const Label& label) noexcept { return addLabelId(label.id()); }
//! Adds the `labelId` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error addLabelId(uint32_t labelId) noexcept { return _labelIds.append(&_compiler->_allocator, labelId); }
ASMJIT_INLINE_NODEBUG Error add_label(const Label& label) noexcept { return add_label_id(label.id()); }
//! Adds the `label_id` to the list of targets of this JumpAnnotation.
ASMJIT_INLINE_NODEBUG Error add_label_id(uint32_t label_id) noexcept { return _label_ids.append(_compiler->_builder_arena, label_id); }
//! \}
};
@@ -347,10 +357,10 @@ public:
//! \name Construction & Destruction
//! \{
inline JumpNode(InstId instId, InstOptions options, uint32_t opCount, JumpAnnotation* annotation) noexcept
: InstNodeWithOperands(instId, options, opCount),
inline JumpNode(InstId inst_id, InstOptions options, uint32_t op_count, JumpAnnotation* annotation) noexcept
: InstNodeWithOperands(inst_id, options, op_count),
_annotation(annotation) {
_setType(NodeType::kJump);
_set_type(NodeType::kJump);
}
//! \}
@@ -360,14 +370,14 @@ public:
//! Tests whether this JumpNode has associated a \ref JumpAnnotation.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasAnnotation() const noexcept { return _annotation != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_annotation() const noexcept { return _annotation != nullptr; }
//! Returns the \ref JumpAnnotation associated with this jump, or `nullptr`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG JumpAnnotation* annotation() const noexcept { return _annotation; }
//! Sets the \ref JumpAnnotation associated with this jump to `annotation`.
ASMJIT_INLINE_NODEBUG void setAnnotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
ASMJIT_INLINE_NODEBUG void set_annotation(JumpAnnotation* annotation) noexcept { _annotation = annotation; }
//! \}
};
@@ -379,17 +389,17 @@ public:
//! - Function entry, \ref FuncNode acts as a label, so the entry is implicit. To get the entry, simply use
//! \ref FuncNode::label(), which is the same as \ref LabelNode::label().
//!
//! - Function exit, which is represented by \ref FuncNode::exitNode(). A helper function
//! \ref FuncNode::exitLabel() exists and returns an exit label instead of node.
//! - Function exit, which is represented by \ref FuncNode::exit_node(). A helper function
//! \ref FuncNode::exit_label() exists and returns an exit label instead of node.
//!
//! - Function \ref FuncNode::endNode() sentinel. This node marks the end of a function - there should be no
//! - Function \ref FuncNode::end_node() sentinel. This node marks the end of a function - there should be no
//! code that belongs to the function after this node, but the Compiler doesn't enforce that at the moment.
//!
//! - Function detail, see \ref FuncNode::detail().
//!
//! - Function frame, see \ref FuncNode::frame().
//!
//! - Function arguments mapped to virtual registers, see \ref FuncNode::argPacks().
//! - Function arguments mapped to virtual registers, see \ref FuncNode::arg_packs().
//!
//! In a node list, the function and its body looks like the following:
//!
@@ -408,8 +418,8 @@ public:
//! [...] - Anything after the function.
//! \endcode
//!
//! When a function is added to the instruction stream by \ref BaseCompiler::addFunc() it actually inserts 3 nodes
//! (FuncNode, ExitLabel, and FuncEnd) and sets the current cursor to be FuncNode. When \ref BaseCompiler::endFunc()
//! When a function is added to the instruction stream by \ref BaseCompiler::add_func() it actually inserts 3 nodes
//! (FuncNode, ExitLabel, and FuncEnd) and sets the current cursor to be FuncNode. When \ref BaseCompiler::end_func()
//! is called the cursor is set to FuncEnd. This guarantees that user can use ExitLabel as a marker after additional
//! code or data can be placed, which is a common practice.
class FuncNode : public LabelNode {
@@ -420,24 +430,25 @@ public:
struct ArgPack {
RegOnly _data[Globals::kMaxValuePack];
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
ASMJIT_INLINE void reset() noexcept {
for (RegOnly& v : _data) {
v.reset();
}
}
inline RegOnly& operator[](size_t valueIndex) noexcept { return _data[valueIndex]; }
inline const RegOnly& operator[](size_t valueIndex) const noexcept { return _data[valueIndex]; }
ASMJIT_INLINE RegOnly& operator[](size_t value_index) noexcept { return _data[value_index]; }
ASMJIT_INLINE const RegOnly& operator[](size_t value_index) const noexcept { return _data[value_index]; }
};
//! \name Members
//! \{
//! Function detail.
FuncDetail _funcDetail;
FuncDetail _func_detail;
//! Function frame.
FuncFrame _frame;
//! Function exit label.
LabelNode* _exitNode;
LabelNode* _exit_node;
//! Function end (sentinel).
SentinelNode* _end;
//! Argument packs.
@@ -450,15 +461,15 @@ public:
//! Creates a new `FuncNode` instance.
//!
//! Always use `BaseCompiler::addFunc()` to create a new `FuncNode`.
inline explicit FuncNode(uint32_t labelId = Globals::kInvalidId) noexcept
: LabelNode(labelId),
_funcDetail(),
//! Always use `BaseCompiler::add_func()` to create a new `FuncNode`.
inline explicit FuncNode(uint32_t label_id = Globals::kInvalidId) noexcept
: LabelNode(label_id),
_func_detail(),
_frame(),
_exitNode(nullptr),
_exit_node(nullptr),
_end(nullptr),
_args(nullptr) {
_setType(NodeType::kFunc);
_set_type(NodeType::kFunc);
}
//! \}
@@ -468,23 +479,23 @@ public:
//! Returns function exit `LabelNode`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG LabelNode* exitNode() const noexcept { return _exitNode; }
ASMJIT_INLINE_NODEBUG LabelNode* exit_node() const noexcept { return _exit_node; }
//! Returns function exit label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label exitLabel() const noexcept { return _exitNode->label(); }
ASMJIT_INLINE_NODEBUG Label exit_label() const noexcept { return _exit_node->label(); }
//! Returns "End of Func" sentinel node.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SentinelNode* endNode() const noexcept { return _end; }
ASMJIT_INLINE_NODEBUG SentinelNode* end_node() const noexcept { return _end; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _func_detail; }
//! Returns function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _func_detail; }
//! Returns function frame.
[[nodiscard]]
@@ -499,61 +510,61 @@ public:
ASMJIT_INLINE_NODEBUG FuncAttributes attributes() const noexcept { return _frame.attributes(); }
//! Adds `attrs` to the function attributes.
ASMJIT_INLINE_NODEBUG void addAttributes(FuncAttributes attrs) noexcept { _frame.addAttributes(attrs); }
ASMJIT_INLINE_NODEBUG void add_attributes(FuncAttributes attrs) noexcept { _frame.add_attributes(attrs); }
//! Returns arguments count.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
ASMJIT_INLINE_NODEBUG uint32_t arg_count() const noexcept { return _func_detail.arg_count(); }
//! Returns argument packs.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ArgPack* argPacks() const noexcept { return _args; }
ASMJIT_INLINE_NODEBUG ArgPack* arg_packs() const noexcept { return _args; }
//! Tests whether the function has a return value.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
ASMJIT_INLINE_NODEBUG bool has_ret() const noexcept { return _func_detail.has_ret(); }
//! Returns argument pack at `argIndex`.
//! Returns argument pack at `arg_index`.
[[nodiscard]]
inline ArgPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
inline ArgPack& arg_pack(size_t arg_index) const noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
return _args[arg_index];
}
//! Sets argument at `argIndex`.
inline void setArg(size_t argIndex, const Reg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
//! Sets argument at `arg_index`.
inline void set_arg(size_t arg_index, const Reg& virt_reg) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][0].init(virt_reg);
}
//! \overload
inline void setArg(size_t argIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][0].init(vReg);
inline void set_arg(size_t arg_index, const RegOnly& virt_reg) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][0].init(virt_reg);
}
//! Sets argument at `argIndex` and `valueIndex`.
inline void setArg(size_t argIndex, size_t valueIndex, const Reg& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
//! Sets argument at `arg_index` and `value_index`.
inline void set_arg(size_t arg_index, size_t value_index, const Reg& virt_reg) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][value_index].init(virt_reg);
}
//! \overload
inline void setArg(size_t argIndex, size_t valueIndex, const RegOnly& vReg) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].init(vReg);
inline void set_arg(size_t arg_index, size_t value_index, const RegOnly& virt_reg) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][value_index].init(virt_reg);
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex].reset();
//! Resets argument pack at `arg_index`.
inline void reset_arg(size_t arg_index) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index].reset();
}
//! Resets argument pack at `argIndex`.
inline void resetArg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex].reset();
//! Resets argument pack at `arg_index`.
inline void reset_arg(size_t arg_index, size_t value_index) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][value_index].reset();
}
//! \}
@@ -570,7 +581,7 @@ public:
//! Creates a new `FuncRetNode` instance.
inline FuncRetNode() noexcept
: InstNodeWithOperands(BaseInst::kIdAbstract, InstOptions::kNone, 0) {
_nodeType = NodeType::kFuncRet;
_node_type = NodeType::kFuncRet;
}
//! \}
@@ -589,23 +600,26 @@ public:
Operand_ _data[Globals::kMaxValuePack];
//! Reset the pack by resetting all operands in the pack.
inline void reset() noexcept {
for (size_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++)
_data[valueIndex].reset();
ASMJIT_INLINE void reset() noexcept {
for (Operand_& op : _data) {
op.reset();
}
}
//! Returns an operand at the given `valueIndex`.
//! Returns an operand at the given `value_index`.
[[nodiscard]]
inline Operand& operator[](size_t valueIndex) noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
ASMJIT_INLINE Operand& operator[](size_t value_index) noexcept {
ASMJIT_ASSERT(value_index < Globals::kMaxValuePack);
return _data[value_index].as<Operand>();
}
//! Returns an operand at the given `valueIndex` (const).
//! Returns an operand at the given `value_index` (const).
[[nodiscard]]
const inline Operand& operator[](size_t valueIndex) const noexcept {
ASMJIT_ASSERT(valueIndex < Globals::kMaxValuePack);
return _data[valueIndex].as<Operand>();
ASMJIT_INLINE const Operand& operator[](size_t value_index) const noexcept {
ASMJIT_ASSERT(value_index < Globals::kMaxValuePack);
return _data[value_index].as<Operand>();
}
};
@@ -613,7 +627,7 @@ public:
//! \{
//! Function detail.
FuncDetail _funcDetail;
FuncDetail _func_detail;
//! Function return value(s).
OperandPack _rets;
//! Function arguments.
@@ -625,14 +639,14 @@ public:
//! \{
//! Creates a new `InvokeNode` instance.
inline InvokeNode(InstId instId, InstOptions options) noexcept
: InstNodeWithOperands(instId, options, 0),
_funcDetail(),
inline InvokeNode(InstId inst_id, InstOptions options) noexcept
: InstNodeWithOperands(inst_id, options, 0),
_func_detail(),
_args(nullptr) {
_setType(NodeType::kInvoke);
_resetOps();
_set_type(NodeType::kInvoke);
_reset_ops();
_rets.reset();
_addFlags(NodeFlags::kIsRemovable);
_add_flags(NodeFlags::kIsRemovable);
}
//! \}
@@ -643,16 +657,16 @@ public:
//! Sets the function signature.
[[nodiscard]]
inline Error init(const FuncSignature& signature, const Environment& environment) noexcept {
return _funcDetail.init(signature, environment);
return _func_detail.init(signature, environment);
}
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _funcDetail; }
ASMJIT_INLINE_NODEBUG FuncDetail& detail() noexcept { return _func_detail; }
//! Returns the function detail.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _funcDetail; }
ASMJIT_INLINE_NODEBUG const FuncDetail& detail() const noexcept { return _func_detail; }
//! Returns the target operand.
[[nodiscard]]
@@ -664,81 +678,81 @@ public:
//! Returns the number of function return values.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasRet() const noexcept { return _funcDetail.hasRet(); }
ASMJIT_INLINE_NODEBUG bool has_ret() const noexcept { return _func_detail.has_ret(); }
//! Returns the number of function arguments.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t argCount() const noexcept { return _funcDetail.argCount(); }
ASMJIT_INLINE_NODEBUG uint32_t arg_count() const noexcept { return _func_detail.arg_count(); }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandPack& retPack() noexcept { return _rets; }
ASMJIT_INLINE_NODEBUG OperandPack& ret_pack() noexcept { return _rets; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OperandPack& retPack() const noexcept { return _rets; }
ASMJIT_INLINE_NODEBUG const OperandPack& ret_pack() const noexcept { return _rets; }
//! Returns the return value at the given `valueIndex`.
//! Returns the return value at the given `value_index`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Operand& ret(size_t valueIndex = 0) noexcept { return _rets[valueIndex]; }
ASMJIT_INLINE_NODEBUG Operand& ret(size_t value_index = 0) noexcept { return _rets[value_index]; }
//! \overload
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t valueIndex = 0) const noexcept { return _rets[valueIndex]; }
ASMJIT_INLINE_NODEBUG const Operand& ret(size_t value_index = 0) const noexcept { return _rets[value_index]; }
//! Returns operand pack representing function return value(s).
[[nodiscard]]
inline OperandPack& argPack(size_t argIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
inline OperandPack& arg_pack(size_t arg_index) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
return _args[arg_index];
}
//! \overload
[[nodiscard]]
inline const OperandPack& argPack(size_t argIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex];
inline const OperandPack& arg_pack(size_t arg_index) const noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
return _args[arg_index];
}
//! Returns a function argument at the given `argIndex`.
//! Returns a function argument at the given `arg_index`.
[[nodiscard]]
inline Operand& arg(size_t argIndex, size_t valueIndex) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
inline Operand& arg(size_t arg_index, size_t value_index) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
return _args[arg_index][value_index];
}
//! \overload
[[nodiscard]]
inline const Operand& arg(size_t argIndex, size_t valueIndex) const noexcept {
ASMJIT_ASSERT(argIndex < argCount());
return _args[argIndex][valueIndex];
inline const Operand& arg(size_t arg_index, size_t value_index) const noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
return _args[arg_index][value_index];
}
//! Sets the function return value at `i` to `op`.
inline void _setRet(size_t valueIndex, const Operand_& op) noexcept { _rets[valueIndex] = op; }
inline void _set_ret(size_t value_index, const Operand_& op) noexcept { _rets[value_index] = op; }
//! Sets the function argument at `i` to `op`.
inline void _setArg(size_t argIndex, size_t valueIndex, const Operand_& op) noexcept {
ASMJIT_ASSERT(argIndex < argCount());
_args[argIndex][valueIndex] = op;
inline void _set_arg(size_t arg_index, size_t value_index, const Operand_& op) noexcept {
ASMJIT_ASSERT(arg_index < arg_count());
_args[arg_index][value_index] = op;
}
//! Sets the function return value at `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setRet(size_t valueIndex, const Reg& reg) noexcept { _setRet(valueIndex, reg); }
//! Sets the function return value at `value_index` to `reg`.
ASMJIT_INLINE_NODEBUG void set_ret(size_t value_index, const Reg& reg) noexcept { _set_ret(value_index, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const Reg& reg) noexcept { _setArg(argIndex, 0, reg); }
//! Sets the first function argument in a value-pack at `argIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, const Imm& imm) noexcept { _setArg(argIndex, 0, imm); }
//! Sets the first function argument in a value-pack at `arg_index` to `reg`.
ASMJIT_INLINE_NODEBUG void set_arg(size_t arg_index, const Reg& reg) noexcept { _set_arg(arg_index, 0, reg); }
//! Sets the first function argument in a value-pack at `arg_index` to `imm`.
ASMJIT_INLINE_NODEBUG void set_arg(size_t arg_index, const Imm& imm) noexcept { _set_arg(arg_index, 0, imm); }
//! Sets the function argument at `argIndex` and `valueIndex` to `reg`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const Reg& reg) noexcept { _setArg(argIndex, valueIndex, reg); }
//! Sets the function argument at `argIndex` and `valueIndex` to `imm`.
ASMJIT_INLINE_NODEBUG void setArg(size_t argIndex, size_t valueIndex, const Imm& imm) noexcept { _setArg(argIndex, valueIndex, imm); }
//! Sets the function argument at `arg_index` and `value_index` to `reg`.
ASMJIT_INLINE_NODEBUG void set_arg(size_t arg_index, size_t value_index, const Reg& reg) noexcept { _set_arg(arg_index, value_index, reg); }
//! Sets the function argument at `arg_index` and `value_index` to `imm`.
ASMJIT_INLINE_NODEBUG void set_arg(size_t arg_index, size_t value_index, const Imm& imm) noexcept { _set_arg(arg_index, value_index, imm); }
//! \}
};
//! Function pass extends \ref Pass with \ref FuncPass::runOnFunction().
//! Function pass extends \ref Pass with \ref FuncPass::run_on_function().
class ASMJIT_VIRTAPI FuncPass : public Pass {
public:
ASMJIT_NONCOPYABLE(FuncPass)
@@ -747,7 +761,7 @@ public:
//! \name Construction & Destruction
//! \{
ASMJIT_API FuncPass(const char* name) noexcept;
ASMJIT_API FuncPass(BaseCompiler& cc, const char* name) noexcept;
//! \}
@@ -756,18 +770,18 @@ public:
//! Returns the associated `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseCompiler* cc() const noexcept { return static_cast<BaseCompiler*>(_cb); }
ASMJIT_INLINE_NODEBUG BaseCompiler& cc() const noexcept { return static_cast<BaseCompiler&>(_cb); }
//! \}
//! \name Pass Interface
//! \{
//! Calls `runOnFunction()` on each `FuncNode` node found.
ASMJIT_API Error run(Zone* zone, Logger* logger) override;
//! Calls `run_on_function()` on each `FuncNode` node found.
ASMJIT_API Error run(Arena& arena, Logger* logger) override;
//! Called once per `FuncNode`.
ASMJIT_API virtual Error runOnFunction(Zone* zone, Logger* logger, FuncNode* func);
ASMJIT_API virtual Error run_on_function(Arena& arena, Logger* logger, FuncNode* func);
//! \}
};

View File

@@ -7,9 +7,10 @@
#define ASMJIT_CORE_COMPILERDEFS_H_INCLUDED
#include "../core/api-config.h"
#include "../core/arenastring.h"
#include "../core/operand.h"
#include "../core/support.h"
#include "../core/type.h"
#include "../core/zonestring.h"
ASMJIT_BEGIN_NAMESPACE
@@ -18,77 +19,114 @@ class RAWorkReg;
//! \addtogroup asmjit_compiler
//! \{
//! Flags associated with a virtual register \ref VirtReg.
enum class VirtRegFlags : uint8_t {
kNone = 0x00u,
//! True if this is a fixed register, never reallocated.
kIsFixed = 0x01u,
//! True if the virtual register is only used as a stack area (never accessed as register). Stack area is allocated
//! via \ref BaseCompiler::_new_stack() and then architecture dependent compilers like \ref x86::Compiler::new_stack().
kIsStackArea = 0x02u,
//! True if the virtual register has a stack slot.
//!
//! Stack slots are assigned by the register allocator - so initially when a \ref VirtReg is created this flag would
//! not be set. When a virtual register is spilled, stack slot is automatically created for the register and the
//! \ref VirtReg::_stack_offset member is updated. Stack areas will always have associated stack slot during register
//! allocation.
kHasStackSlot = 0x04u,
//! Virtual register `log2(alignment)` mask (for spilling) (3 bits in flags).
//!
//! \note For space purposes the alignment is stored as log2(alignment). So the alignment is `1 << log2(alignment)`.
kAlignmentLog2Mask = 0xE0u
};
ASMJIT_DEFINE_ENUM_FLAGS(VirtRegFlags)
//! Public virtual register interface, managed by \ref BaseCompiler.
//!
//! When a virtual register is created by \ref BaseCompiler a `VirtReg` is linked with the register operand id it
//! returns. This `VirtReg` can be accessed via \ref BaseCompiler::virtRegByReg() function, which returns a pointer
//! returns. This `VirtReg` can be accessed via \ref BaseCompiler::virt_reg_by_reg() function, which returns a pointer
//! to `VirtReg`.
//!
//! In general, `VirtReg` should be only introspected as it contains important variables that are needed and managed
//! by AsmJit, however, the `VirtReg` API can also be used to influence register allocation. For example there is
//! a \ref VirtReg::setWeight() function, which could be used to increase a weight of a virtual register (thus make
//! it hard to spill, for example). In addition, there is a \ref VirtReg::setHomeIdHint() function, which can be used
//! a \ref VirtReg::set_weight() function, which could be used to increase a weight of a virtual register (thus make
//! it hard to spill, for example). In addition, there is a \ref VirtReg::set_home_id_hint() function, which can be used
//! to do an initial assignment of a physical register of a virtual register. However, AsmJit could still override
//! the physical register assigned in some special cases.
class VirtReg {
public:
ASMJIT_NONCOPYABLE(VirtReg)
//! \name Constants
//! \{
static constexpr inline uint32_t kAlignmentLog2Mask = uint32_t(VirtRegFlags::kAlignmentLog2Mask);
static constexpr inline uint32_t kAlignmentLog2Shift = Support::ctz_const<kAlignmentLog2Mask>;
static ASMJIT_INLINE_CONSTEXPR VirtRegFlags _flags_from_alignment_log2(uint32_t alignment_log2) noexcept {
return VirtRegFlags(alignment_log2 << kAlignmentLog2Shift);
}
static ASMJIT_INLINE_CONSTEXPR uint32_t _alignment_log2_from_flags(VirtRegFlags flags) noexcept {
return uint32_t(flags) >> kAlignmentLog2Shift;
}
//! \}
//! \name Members
//! \{
//! Virtual register signature.
OperandSignature _signature {};
//! Virtual register id.
uint32_t _id = 0;
//! Virtual register size (can be smaller than `_signature._size`).
uint32_t _virtSize = 0;
//! Virtual register alignment (for spilling).
uint8_t _alignment = 0;
//! Type-id.
TypeId _typeId = TypeId::kVoid;
//! Virtual register weight for alloc/spill decisions.
uint8_t _weight = 1;
//! True if this is a fixed register, never reallocated.
uint8_t _isFixed : 1;
//! True if the virtual register is only used as a stack (never accessed as register).
uint8_t _isStack : 1;
//! True if this virtual register has assigned stack offset (can be only valid after register allocation pass).
uint8_t _hasStackSlot : 1;
uint8_t _reservedBits : 5;
//! Virtual register size (can be smaller than a real register size if only a part of the register is used).
uint32_t _virt_size = 0;
//! Virtual register type.
RegType _reg_type = RegType::kNone;
//! Virtual register flags.
VirtRegFlags _reg_flags = VirtRegFlags::kNone;
//! Virtual register weight.
//!
//! Weight is used for alloc/spill decisions. Higher weight means a higher priority to keep the virtual
//! register always allocated as a physical register. The default weight is zero, which means standard
//! weight (no weight is added to the initial priority, which is calculated based on the number of uses
//! divided by the sum of widths of all live spans).
uint8_t _weight = 0;
//! Type id.
TypeId _type_id = TypeId::kVoid;
//! Home register hint for the register allocator (initially unassigned).
uint8_t _homeIdHint = Reg::kIdBad;
uint8_t _home_id_hint = Reg::kIdBad;
//! Stack offset assigned by the register allocator relative to stack pointer (can be negative as well).
int32_t _stackOffset = 0;
//! Reserved for future use (padding).
uint32_t _reservedU32 = 0;
int32_t _stack_offset = 0;
//! Virtual register name (either empty or user provided).
ZoneString<16> _name {};
ArenaString<16> _name {};
// The following members are used exclusively by RAPass. They are initialized when the VirtReg is created to
// null pointers and then changed during RAPass execution. RAPass sets them back to NULL before it returns.
//! Reference to `RAWorkReg`, used during register allocation.
RAWorkReg* _workReg = nullptr;
RAWorkReg* _work_reg = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_INLINE_NODEBUG VirtReg(OperandSignature signature, uint32_t id, uint32_t virtSize, uint32_t alignment, TypeId typeId) noexcept
: _signature(signature),
_id(id),
_virtSize(virtSize),
_alignment(uint8_t(alignment)),
_typeId(typeId),
_isFixed(0),
_isStack(0),
_hasStackSlot(0),
_reservedBits(0) {}
ASMJIT_INLINE_NODEBUG VirtReg(RegType reg_type, VirtRegFlags reg_flags, uint32_t id, uint32_t virt_size, TypeId type_id) noexcept
: _id(id),
_virt_size(virt_size),
_reg_type(reg_type),
_reg_flags(reg_flags),
_type_id(type_id) {}
//! \}
@@ -99,116 +137,104 @@ public:
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t id() const noexcept { return _id; }
//! Returns the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t nameSize() const noexcept { return _name.size(); }
//! Returns a register signature of this virtual register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature signature() const noexcept { return _signature; }
//! Returns a virtual register type (maps to the physical register type as well).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegType type() const noexcept { return _signature.regType(); }
ASMJIT_INLINE_NODEBUG RegType reg_type() const noexcept { return _reg_type; }
//! Returns a virtual register group (maps to the physical register group as well).
//! Returns a virtual register flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegGroup group() const noexcept { return _signature.regGroup(); }
//! Returns a real size of the register this virtual register maps to.
//!
//! For example if this is a 128-bit SIMD register used for a scalar single precision floating point value then
//! its virtSize would be 4, however, the `regSize` would still say 16 (128-bits), because it's the smallest size
//! of that register type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regSize() const noexcept { return _signature.size(); }
ASMJIT_INLINE_NODEBUG VirtRegFlags reg_flags() const noexcept { return _reg_flags; }
//! Returns the virtual register size.
//!
//! The virtual register size describes how many bytes the virtual register needs to store its content. It can be
//! smaller than the physical register size, see `regSize()`.
//! smaller than the physical register size, see `register_size()`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t virtSize() const noexcept { return _virtSize; }
ASMJIT_INLINE_NODEBUG uint32_t virt_size() const noexcept { return _virt_size; }
//! Returns the virtual register alignment.
//! Returns the virtual register alignment required for memory operations (load/spill).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return _alignment; }
ASMJIT_INLINE_NODEBUG uint32_t alignment() const noexcept { return 1u << _alignment_log2_from_flags(_reg_flags); }
//! Returns the virtual register type id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG TypeId typeId() const noexcept { return _typeId; }
ASMJIT_INLINE_NODEBUG TypeId type_id() const noexcept { return _type_id; }
//! Returns the virtual register weight - the register allocator can use it as explicit hint for alloc/spill
//! decisions.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t weight() const noexcept { return _weight; }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for
//! alloc/spill decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void setWeight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Sets the virtual register weight (0 to 255) - the register allocator can use it as explicit hint for alloc/spill
//! decisions and initial bin-packing.
ASMJIT_INLINE_NODEBUG void set_weight(uint32_t weight) noexcept { _weight = uint8_t(weight); }
//! Returns whether the virtual register is always allocated to a fixed physical register (and never reallocated).
//!
//! \note This is only used for special purposes and it's mostly internal.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFixed() const noexcept { return bool(_isFixed); }
ASMJIT_INLINE_NODEBUG bool is_fixed() const noexcept { return Support::test(_reg_flags, VirtRegFlags::kIsFixed); }
//! Tests whether the virtual register is in fact a stack that only uses the virtual register id.
//!
//! \note It's an error if a stack is accessed as a register.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isStack() const noexcept { return bool(_isStack); }
ASMJIT_INLINE_NODEBUG bool is_stack_area() const noexcept { return Support::test(_reg_flags, VirtRegFlags::kIsStackArea); }
//! Tests whether this virtual register (or stack) has assigned a stack offset.
//!
//! If this is a virtual register that was never allocated on stack, it would return false, otherwise if
//! it's a virtual register that was spilled or explicitly allocated stack, the return value would be true.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasStackSlot() const noexcept { return bool(_hasStackSlot); }
ASMJIT_INLINE_NODEBUG bool has_stack_slot() const noexcept { return Support::test(_reg_flags, VirtRegFlags::kHasStackSlot); }
//! Assigns a stack offset of this virtual register to `stackOffset` and sets `_hasStackSlot` to true.
ASMJIT_INLINE_NODEBUG void assignStackSlot(int32_t stackOffset) noexcept {
_hasStackSlot = 1;
_stackOffset = stackOffset;
//! Assigns a stack offset of this virtual register to `stack_offset` and adds `VirtRegFlags::kHasStackSlot` flag.
ASMJIT_INLINE_NODEBUG void assign_stack_slot(int32_t stack_offset) noexcept {
_reg_flags |= VirtRegFlags::kHasStackSlot;
_stack_offset = stack_offset;
}
//! Tests whether this virtual register has assigned a physical register as a hint to the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasHomeIdHint() const noexcept { return _homeIdHint != Reg::kIdBad; }
ASMJIT_INLINE_NODEBUG bool has_home_id_hint() const noexcept { return _home_id_hint != Reg::kIdBad; }
//! Returns a physical register hint, which will be used by the register allocator.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t homeIdHint() const noexcept { return _homeIdHint; }
ASMJIT_INLINE_NODEBUG uint32_t home_id_hint() const noexcept { return _home_id_hint; }
//! Assigns a physical register hint, which will be used by the register allocator.
ASMJIT_INLINE_NODEBUG void setHomeIdHint(uint32_t homeId) noexcept { _homeIdHint = uint8_t(homeId); }
ASMJIT_INLINE_NODEBUG void set_home_id_hint(uint32_t home_id) noexcept { _home_id_hint = uint8_t(home_id); }
//! Resets a physical register hint.
ASMJIT_INLINE_NODEBUG void resetHomeIdHint() noexcept { _homeIdHint = Reg::kIdBad; }
ASMJIT_INLINE_NODEBUG void reset_home_id_hint() noexcept { _home_id_hint = Reg::kIdBad; }
//! Returns a stack offset associated with a virtual register or explicit stack allocation.
//!
//! \note Always verify that the stack offset has been assigned by calling \ref hasStackSlot(). The return
//! \note Always verify that the stack offset has been assigned by calling \ref has_stack_slot(). The return
//! value will be zero when the stack offset was not assigned.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int32_t stackOffset() const noexcept { return _stackOffset; }
ASMJIT_INLINE_NODEBUG int32_t stack_offset() const noexcept { return _stack_offset; }
//! Returns the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* name() const noexcept { return _name.data(); }
//! Returns the size of the virtual register name.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t name_size() const noexcept { return _name.size(); }
//! Tests whether the virtual register has an associated `RAWorkReg` at the moment.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasWorkReg() const noexcept { return _workReg != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_work_reg() const noexcept { return _work_reg != nullptr; }
//! Returns an associated RAWorkReg with this virtual register (only valid during register allocation).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RAWorkReg* workReg() const noexcept { return _workReg; }
ASMJIT_INLINE_NODEBUG RAWorkReg* work_reg() const noexcept { return _work_reg; }
//! Associates a RAWorkReg with this virtual register (used by register allocator).
ASMJIT_INLINE_NODEBUG void setWorkReg(RAWorkReg* workReg) noexcept { _workReg = workReg; }
ASMJIT_INLINE_NODEBUG void set_work_reg(RAWorkReg* work_reg) noexcept { _work_reg = work_reg; }
//! Reset the RAWorkReg association (used by register allocator).
ASMJIT_INLINE_NODEBUG void resetWorkReg() noexcept { _workReg = nullptr; }
ASMJIT_INLINE_NODEBUG void reset_work_reg() noexcept { _work_reg = nullptr; }
//! \}
};

View File

@@ -12,150 +12,152 @@ ASMJIT_BEGIN_NAMESPACE
// ConstPool - Construction & Destruction
// ======================================
ConstPool::ConstPool(Zone* zone) noexcept { reset(zone); }
ConstPool::ConstPool(Arena& arena) noexcept : _arena(arena) {
reset();
}
ConstPool::~ConstPool() noexcept {}
// ConstPool - Reset
// =================
void ConstPool::reset(Zone* zone) noexcept {
_zone = zone;
void ConstPool::reset() noexcept {
size_t data_size = 1;
size_t dataSize = 1;
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].reset();
_tree[i].setDataSize(dataSize);
_tree[i].set_data_size(data_size);
_gaps[i] = nullptr;
dataSize <<= 1;
data_size <<= 1;
}
_gapPool = nullptr;
_gap_pool = nullptr;
_size = 0;
_alignment = 0;
_minItemSize = 0;
_min_item_size = 0;
}
// ConstPool - Operations
// ======================
static inline ConstPool::Gap* ConstPool_allocGap(ConstPool* self) noexcept {
ConstPool::Gap* gap = self->_gapPool;
ConstPool::Gap* gap = self->_gap_pool;
if (!gap) {
return self->_zone->alloc<ConstPool::Gap>();
return self->_arena.alloc_oneshot<ConstPool::Gap>();
}
self->_gapPool = gap->_next;
self->_gap_pool = gap->_next;
return gap;
}
static inline void ConstPool_freeGap(ConstPool* self, ConstPool::Gap* gap) noexcept {
gap->_next = self->_gapPool;
self->_gapPool = gap;
gap->_next = self->_gap_pool;
self->_gap_pool = gap;
}
static void ConstPool_addGap(ConstPool* self, size_t offset, size_t size) noexcept {
ASMJIT_ASSERT(size > 0);
while (size > 0) {
size_t gapIndex;
size_t gapSize;
size_t gap_index;
size_t gap_size;
if (size >= 32 && Support::isAligned<size_t>(offset, 32)) {
gapIndex = ConstPool::kIndex32;
gapSize = 32;
if (size >= 32 && Support::is_aligned<size_t>(offset, 32)) {
gap_index = ConstPool::kIndex32;
gap_size = 32;
}
else if (size >= 16 && Support::isAligned<size_t>(offset, 16)) {
gapIndex = ConstPool::kIndex16;
gapSize = 16;
else if (size >= 16 && Support::is_aligned<size_t>(offset, 16)) {
gap_index = ConstPool::kIndex16;
gap_size = 16;
}
else if (size >= 8 && Support::isAligned<size_t>(offset, 8)) {
gapIndex = ConstPool::kIndex8;
gapSize = 8;
else if (size >= 8 && Support::is_aligned<size_t>(offset, 8)) {
gap_index = ConstPool::kIndex8;
gap_size = 8;
}
else if (size >= 4 && Support::isAligned<size_t>(offset, 4)) {
gapIndex = ConstPool::kIndex4;
gapSize = 4;
else if (size >= 4 && Support::is_aligned<size_t>(offset, 4)) {
gap_index = ConstPool::kIndex4;
gap_size = 4;
}
else if (size >= 2 && Support::isAligned<size_t>(offset, 2)) {
gapIndex = ConstPool::kIndex2;
gapSize = 2;
else if (size >= 2 && Support::is_aligned<size_t>(offset, 2)) {
gap_index = ConstPool::kIndex2;
gap_size = 2;
}
else {
gapIndex = ConstPool::kIndex1;
gapSize = 1;
gap_index = ConstPool::kIndex1;
gap_size = 1;
}
// We don't have to check for errors here, if this failed nothing really happened (just the gap won't be
// visible) and it will fail again at place where the same check would generate `kErrorOutOfMemory` error.
// visible) and it will fail again at place where the same check would generate \ref Error::kOutOfMemory error.
ConstPool::Gap* gap = ConstPool_allocGap(self);
if (!gap) {
return;
}
gap->_next = self->_gaps[gapIndex];
self->_gaps[gapIndex] = gap;
gap->_next = self->_gaps[gap_index];
self->_gaps[gap_index] = gap;
gap->_offset = offset;
gap->_size = gapSize;
gap->_size = gap_size;
offset += gapSize;
size -= gapSize;
offset += gap_size;
size -= gap_size;
}
}
Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept {
Error ConstPool::add(const void* data, size_t size, Out<size_t> offset_out) noexcept {
constexpr size_t kMaxSize = size_t(1) << (kIndexCount - 1);
// Avoid sizes outside of the supported range.
if (ASMJIT_UNLIKELY(size == 0 || size > kMaxSize)) {
return DebugUtils::errored(kErrorInvalidArgument);
return make_error(Error::kInvalidArgument);
}
size_t treeIndex = Support::ctz(size);
size_t tree_index = Support::ctz(size);
// Avoid sizes, which are not aligned to power of 2.
if (ASMJIT_UNLIKELY((size_t(1) << treeIndex) != size)) {
return DebugUtils::errored(kErrorInvalidArgument);
if (ASMJIT_UNLIKELY((size_t(1) << tree_index) != size)) {
return make_error(Error::kInvalidArgument);
}
ConstPool::Node* node = _tree[treeIndex].get(data);
ConstPool::Node* node = _tree[tree_index].get(data);
if (node) {
dstOffset = node->_offset;
return kErrorOk;
offset_out = node->_offset;
return Error::kOk;
}
// Before incrementing the current offset try if there is a gap that can be used for the requested data.
size_t offset = ~size_t(0);
size_t gapIndex = treeIndex;
size_t gap_index = tree_index;
while (gapIndex != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[treeIndex];
while (gap_index != kIndexCount - 1) {
ConstPool::Gap* gap = _gaps[tree_index];
// Check if there is a gap.
if (gap) {
size_t gapOffset = gap->_offset;
size_t gapSize = gap->_size;
size_t gap_offset = gap->_offset;
size_t gap_size = gap->_size;
// Destroy the gap for now.
_gaps[treeIndex] = gap->_next;
_gaps[tree_index] = gap->_next;
ConstPool_freeGap(this, gap);
offset = gapOffset;
ASMJIT_ASSERT(Support::isAligned<size_t>(offset, size));
offset = gap_offset;
ASMJIT_ASSERT(Support::is_aligned<size_t>(offset, size));
gapSize -= size;
if (gapSize > 0) {
ConstPool_addGap(this, gapOffset, gapSize);
gap_size -= size;
if (gap_size > 0) {
ConstPool_addGap(this, gap_offset, gap_size);
}
}
gapIndex++;
gap_index++;
}
if (offset == ~size_t(0)) {
// Get how many bytes have to be skipped so the address is aligned accordingly to the 'size'.
size_t diff = Support::alignUpDiff<size_t>(_size, size);
size_t diff = Support::align_up_diff<size_t>(_size, size);
if (diff != 0) {
ConstPool_addGap(this, _size, diff);
@@ -167,60 +169,60 @@ Error ConstPool::add(const void* data, size_t size, size_t& dstOffset) noexcept
}
// Add the initial node to the right index.
node = ConstPool::Tree::_newNode(_zone, data, size, offset, false);
node = ConstPool::Tree::new_node_t(_arena, data, size, offset, false);
if (ASMJIT_UNLIKELY(!node)) {
return DebugUtils::errored(kErrorOutOfMemory);
return make_error(Error::kOutOfMemory);
}
_tree[treeIndex].insert(node);
_tree[tree_index].insert(node);
_alignment = Support::max<size_t>(_alignment, size);
dstOffset = offset;
offset_out = offset;
// Now create a bunch of shared constants that are based on the data pattern. We stop at size 4,
// it probably doesn't make sense to split constants down to 1 byte.
size_t pCount = 1;
size_t smallerSize = size;
size_t p_count = 1;
size_t smaller_size = size;
while (smallerSize > 4) {
pCount <<= 1;
smallerSize >>= 1;
while (smaller_size > 4) {
p_count <<= 1;
smaller_size >>= 1;
ASMJIT_ASSERT(treeIndex != 0);
treeIndex--;
ASMJIT_ASSERT(tree_index != 0);
tree_index--;
const uint8_t* pData = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < pCount; i++, pData += smallerSize) {
node = _tree[treeIndex].get(pData);
const uint8_t* data_ptr = static_cast<const uint8_t*>(data);
for (size_t i = 0; i < p_count; i++, data_ptr += smaller_size) {
node = _tree[tree_index].get(data_ptr);
if (node) {
continue;
}
node = ConstPool::Tree::_newNode(_zone, pData, smallerSize, offset + (i * smallerSize), true);
_tree[treeIndex].insert(node);
node = ConstPool::Tree::new_node_t(_arena, data_ptr, smaller_size, offset + (i * smaller_size), true);
_tree[tree_index].insert(node);
}
}
_minItemSize = !_minItemSize ? size : Support::min(_minItemSize, size);
return kErrorOk;
_min_item_size = !_min_item_size ? size : Support::min(_min_item_size, size);
return Error::kOk;
}
// ConstPool - Reset
// =================
struct ConstPoolFill {
inline ConstPoolFill(uint8_t* dst, size_t dataSize) noexcept :
inline ConstPoolFill(uint8_t* dst, size_t data_size) noexcept :
_dst(dst),
_dataSize(dataSize) {}
_data_size(data_size) {}
inline void operator()(const ConstPool::Node* node) noexcept {
if (!node->_shared) {
memcpy(_dst + node->_offset, node->data(), _dataSize);
memcpy(_dst + node->_offset, node->data(), _data_size);
}
}
uint8_t* _dst;
size_t _dataSize;
size_t _data_size;
};
void ConstPool::fill(void* dst) const noexcept {
@@ -229,8 +231,8 @@ void ConstPool::fill(void* dst) const noexcept {
ConstPoolFill filler(static_cast<uint8_t*>(dst), 1);
for (size_t i = 0; i < ASMJIT_ARRAY_SIZE(_tree); i++) {
_tree[i].forEach(filler);
filler._dataSize <<= 1;
_tree[i].for_each(filler);
filler._data_size <<= 1;
}
}
@@ -239,27 +241,27 @@ void ConstPool::fill(void* dst) const noexcept {
#if defined(ASMJIT_TEST)
UNIT(const_pool) {
Zone zone(32u * 1024u);
ConstPool pool(&zone);
Arena arena(32u * 1024u);
ConstPool pool(arena);
uint32_t i;
uint32_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 1000000;
uint32_t kCount = BrokenAPI::has_arg("--quick") ? 1000 : 1000000;
INFO("Adding %u constants to the pool", kCount);
{
size_t prevOffset;
size_t curOffset;
size_t prev_offset;
size_t cur_offset;
uint64_t c = 0x0101010101010101u;
EXPECT_EQ(pool.add(&c, 8, prevOffset), kErrorOk);
EXPECT_EQ(prevOffset, 0u);
EXPECT_EQ(pool.add(&c, 8, Out(prev_offset)), Error::kOk);
EXPECT_EQ(prev_offset, 0u);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 8, curOffset), kErrorOk);
EXPECT_EQ(prevOffset + 8, curOffset);
EXPECT_EQ(pool.add(&c, 8, Out(cur_offset)), Error::kOk);
EXPECT_EQ(prev_offset + 8, cur_offset);
EXPECT_EQ(pool.size(), (i + 1) * 8);
prevOffset = curOffset;
prev_offset = cur_offset;
}
EXPECT_EQ(pool.alignment(), 8u);
@@ -271,7 +273,7 @@ UNIT(const_pool) {
for (i = 0; i < kCount; i++) {
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(pool.add(&c, 8, Out(offset)), Error::kOk);
EXPECT_EQ(offset, i * 8);
c++;
}
@@ -282,16 +284,16 @@ UNIT(const_pool) {
uint32_t c = 0x01010101u;
size_t offset;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(pool.add(&c, 4, Out(offset)), Error::kOk);
EXPECT_EQ(offset, 0u);
// NOTE: We have to adjust the offset to successfully test this on big endian architectures.
size_t baseOffset = size_t(ASMJIT_ARCH_BE ? 4 : 0);
size_t base_offset = size_t(Support::ByteOrder::kNative == Support::ByteOrder::kBE ? 4 : 0);
for (i = 1; i < kCount; i++) {
c++;
EXPECT_EQ(pool.add(&c, 4, offset), kErrorOk);
EXPECT_EQ(offset, baseOffset + i * 8);
EXPECT_EQ(pool.add(&c, 4, Out(offset)), Error::kOk);
EXPECT_EQ(offset, base_offset + i * 8);
}
}
@@ -300,7 +302,7 @@ UNIT(const_pool) {
uint16_t c = 0xFFFF;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(pool.add(&c, 2, Out(offset)), Error::kOk);
EXPECT_EQ(offset, kCount * 8);
EXPECT_EQ(pool.alignment(), 8u);
}
@@ -310,7 +312,7 @@ UNIT(const_pool) {
uint64_t c = 0xFFFFFFFFFFFFFFFFu;
size_t offset;
EXPECT_EQ(pool.add(&c, 8, offset), kErrorOk);
EXPECT_EQ(pool.add(&c, 8, Out(offset)), Error::kOk);
EXPECT_EQ(offset, kCount * 8 + 8u);
}
@@ -319,15 +321,15 @@ UNIT(const_pool) {
uint16_t c = 0xFFFE;
size_t offset;
EXPECT_EQ(pool.add(&c, 2, offset), kErrorOk);
EXPECT_EQ(pool.add(&c, 2, Out(offset)), Error::kOk);
EXPECT_EQ(offset, kCount * 8 + 2);
EXPECT_EQ(pool.alignment(), 8u);
}
INFO("Checking reset functionality");
{
pool.reset(&zone);
zone.reset();
pool.reset();
arena.reset();
EXPECT_EQ(pool.size(), 0u);
EXPECT_EQ(pool.alignment(), 0u);
@@ -338,27 +340,27 @@ UNIT(const_pool) {
uint8_t bytes[32] = { 0 };
size_t offset;
pool.add(bytes, 1, offset);
pool.add(bytes, 1, Out(offset));
EXPECT_EQ(pool.size(), 1u);
EXPECT_EQ(pool.alignment(), 1u);
EXPECT_EQ(offset, 0u);
pool.add(bytes, 2, offset);
pool.add(bytes, 2, Out(offset));
EXPECT_EQ(pool.size(), 4u);
EXPECT_EQ(pool.alignment(), 2u);
EXPECT_EQ(offset, 2u);
pool.add(bytes, 4, offset);
pool.add(bytes, 4, Out(offset));
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 4, offset);
pool.add(bytes, 4, Out(offset));
EXPECT_EQ(pool.size(), 8u);
EXPECT_EQ(pool.alignment(), 4u);
EXPECT_EQ(offset, 4u);
pool.add(bytes, 32, offset);
pool.add(bytes, 32, Out(offset));
EXPECT_EQ(pool.size(), 64u);
EXPECT_EQ(pool.alignment(), 32u);
EXPECT_EQ(offset, 32u);

View File

@@ -6,9 +6,9 @@
#ifndef ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#define ASMJIT_CORE_CONSTPOOL_H_INCLUDED
#include "../core/arena.h"
#include "../core/arenatree.h"
#include "../core/support.h"
#include "../core/zone.h"
#include "../core/zonetree.h"
ASMJIT_BEGIN_NAMESPACE
@@ -48,7 +48,7 @@ public:
kIndexCount = 7
};
//! Zone-allocated const-pool gap created by two differently aligned constants.
//! Arena-allocated const-pool gap created by two differently aligned constants.
struct Gap {
//! Pointer to the next gap
Gap* _next;
@@ -58,8 +58,8 @@ public:
size_t _size;
};
//! Zone-allocated const-pool node.
class Node : public ZoneTreeNodeT<Node> {
//! Arena-allocated const-pool node.
class Node : public ArenaTreeNodeT<Node> {
public:
ASMJIT_NONCOPYABLE(Node)
@@ -69,49 +69,49 @@ public:
uint32_t _offset;
ASMJIT_INLINE_NODEBUG Node(size_t offset, bool shared) noexcept
: ZoneTreeNodeT<Node>(),
: ArenaTreeNodeT<Node>(),
_shared(shared),
_offset(uint32_t(offset)) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG void* data() noexcept { return Support::offsetPtr<void>(this, sizeof(*this)); }
ASMJIT_INLINE_NODEBUG void* data() noexcept { return Support::offset_ptr<void>(this, sizeof(*this)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const void* data() const noexcept { return Support::offsetPtr<void>(this, sizeof(*this)); }
ASMJIT_INLINE_NODEBUG const void* data() const noexcept { return Support::offset_ptr<void>(this, sizeof(*this)); }
};
//! Data comparer used internally.
class Compare {
public:
size_t _dataSize;
size_t _data_size;
ASMJIT_INLINE_NODEBUG Compare(size_t dataSize) noexcept
: _dataSize(dataSize) {}
ASMJIT_INLINE_NODEBUG Compare(size_t data_size) noexcept
: _data_size(data_size) {}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const Node& b) const noexcept {
return ::memcmp(a.data(), b.data(), _dataSize);
return ::memcmp(a.data(), b.data(), _data_size);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG int operator()(const Node& a, const void* data) const noexcept {
return ::memcmp(a.data(), data, _dataSize);
return ::memcmp(a.data(), data, _data_size);
}
};
//! Zone-allocated const-pool tree.
//! Arena-allocated const-pool tree.
struct Tree {
//! RB tree.
ZoneTree<Node> _tree;
ArenaTree<Node> _tree;
//! Size of the tree (number of nodes).
size_t _size;
//! Size of the data.
size_t _dataSize;
size_t _data_size;
ASMJIT_INLINE_NODEBUG explicit Tree(size_t dataSize = 0) noexcept
ASMJIT_INLINE_NODEBUG explicit Tree(size_t data_size = 0) noexcept
: _tree(),
_size(0),
_dataSize(dataSize) {}
_data_size(data_size) {}
ASMJIT_INLINE_NODEBUG void reset() noexcept {
_tree.reset();
@@ -119,30 +119,30 @@ public:
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _size == 0; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t size() const noexcept { return _size; }
inline void setDataSize(size_t dataSize) noexcept {
ASMJIT_ASSERT(empty());
_dataSize = dataSize;
inline void set_data_size(size_t data_size) noexcept {
ASMJIT_ASSERT(is_empty());
_data_size = data_size;
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Node* get(const void* data) noexcept {
Compare cmp(_dataSize);
Compare cmp(_data_size);
return _tree.get(data, cmp);
}
ASMJIT_INLINE_NODEBUG void insert(Node* node) noexcept {
Compare cmp(_dataSize);
Compare cmp(_data_size);
_tree.insert(node, cmp);
_size++;
}
template<typename Visitor>
inline void forEach(Visitor& visitor) const noexcept {
inline void for_each(Visitor& visitor) const noexcept {
Node* node = _tree.root();
if (!node) return;
@@ -175,9 +175,8 @@ public:
}
[[nodiscard]]
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
size_t nodeSize = Support::alignUp(sizeof(Node) + size, Globals::kZoneAlignment);
Node* node = zone->alloc<Node>(nodeSize);
static inline Node* new_node_t(Arena& arena, const void* data, size_t size, size_t offset, bool shared) noexcept {
Node* node = arena.alloc_oneshot<Node>(Arena::aligned_size(sizeof(Node) + size));
if (ASMJIT_UNLIKELY(!node)) {
return nullptr;
@@ -194,29 +193,29 @@ public:
//! \name Members
//! \{
//! Zone allocator.
Zone* _zone;
//! Arena.
Arena& _arena;
//! Tree per size.
Tree _tree[kIndexCount];
//! Gaps per size.
Gap* _gaps[kIndexCount];
//! Gaps pool
Gap* _gapPool;
Gap* _gap_pool;
//! Size of the pool (in bytes).
size_t _size;
//! Required pool alignment.
size_t _alignment;
//! Minimum item size in the pool.
size_t _minItemSize;
size_t _min_item_size;
//! \}
//! \name Construction & Destruction
//! \{
//! Creates a new constant pool that would use `zone` as a memory allocator.
ASMJIT_API explicit ConstPool(Zone* zone) noexcept;
//! Creates a new constant pool that would use `arena` as a memory allocator.
ASMJIT_API explicit ConstPool(Arena& arena) noexcept;
//! Destroys this constant pool.
ASMJIT_API ~ConstPool() noexcept;
@@ -225,8 +224,8 @@ public:
//! \name Reset
//! \{
//! Resets this constant pool and its allocator to `zone`.
ASMJIT_API void reset(Zone* zone) noexcept;
//! Resets this constant pool.
ASMJIT_API void reset() noexcept;
//! \}
@@ -235,7 +234,7 @@ public:
//! Tests whether the constant-pool is empty.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept { return _size == 0; }
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept { return _size == 0; }
//! Returns the size of the constant-pool in bytes.
[[nodiscard]]
@@ -247,7 +246,7 @@ public:
//! Returns the minimum size of all items added to the constant pool.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t minItemSize() const noexcept { return _minItemSize; }
ASMJIT_INLINE_NODEBUG size_t min_item_size() const noexcept { return _min_item_size; }
//! \}
@@ -268,7 +267,7 @@ public:
//! The reason is that when combining MMX/SSE/AVX code some patterns are used frequently. However, AsmJit is not
//! able to reallocate a constant that has been already added. For example if you try to add 4-byte constant and
//! then 8-byte constant having the same 4-byte pattern as the previous one, two independent slots will be used.
ASMJIT_API Error add(const void* data, size_t size, size_t& dstOffset) noexcept;
ASMJIT_API Error add(const void* data, size_t size, Out<size_t> offset_out) noexcept;
//! Fills the destination with the content of this constant pool.
ASMJIT_API void fill(void* dst) const noexcept;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -19,46 +19,46 @@ ASMJIT_BEGIN_NAMESPACE
// ===========================
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
static void dumpFuncValue(String& sb, Arch arch, const FuncValue& value) noexcept {
Formatter::formatTypeId(sb, value.typeId());
static void dump_func_value(String& sb, Arch arch, const FuncValue& value) noexcept {
Formatter::format_type_id(sb, value.type_id());
sb.append('@');
if (value.isIndirect()) {
if (value.is_indirect()) {
sb.append('[');
}
if (value.isReg()) {
Formatter::formatRegister(sb, 0, nullptr, arch, value.regType(), value.regId());
if (value.is_reg()) {
Formatter::format_register(sb, 0, nullptr, arch, value.reg_type(), value.reg_id());
}
else if (value.isStack()) {
sb.appendFormat("[%d]", value.stackOffset());
else if (value.is_stack()) {
sb.append_format("[%d]", value.stack_offset());
}
else {
sb.append("<none>");
}
if (value.isIndirect()) {
if (value.is_indirect()) {
sb.append(']');
}
}
static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
static void dump_assignment(String& sb, const FuncArgsContext& ctx) noexcept {
using Var = FuncArgsContext::Var;
Arch arch = ctx.arch();
uint32_t varCount = ctx.varCount();
uint32_t var_count = ctx.var_count();
for (uint32_t i = 0; i < varCount; i++) {
for (uint32_t i = 0; i < var_count; i++) {
const Var& var = ctx.var(i);
const FuncValue& dst = var.out;
const FuncValue& cur = var.cur;
sb.appendFormat("Var%u: ", i);
dumpFuncValue(sb, arch, dst);
sb.append_format("Var%u: ", i);
dump_func_value(sb, arch, dst);
sb.append(" <- ");
dumpFuncValue(sb, arch, cur);
dump_func_value(sb, arch, cur);
if (var.isDone()) {
if (var.is_done()) {
sb.append(" {Done}");
}
@@ -70,25 +70,25 @@ static void dumpAssignment(String& sb, const FuncArgsContext& ctx) noexcept {
// BaseEmitHelper - Abstract
// =========================
Error BaseEmitHelper::emitRegMove(const Operand_& dst_, const Operand_& src_, TypeId typeId, const char* comment) {
DebugUtils::unused(dst_, src_, typeId, comment);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitHelper::emit_reg_move(const Operand_& dst_, const Operand_& src_, TypeId type_id, const char* comment) {
Support::maybe_unused(dst_, src_, type_id, comment);
return make_error(Error::kInvalidState);
}
Error BaseEmitHelper::emitRegSwap(const Reg& a, const Reg& b, const char* comment) {
DebugUtils::unused(a, b, comment);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitHelper::emit_reg_swap(const Reg& a, const Reg& b, const char* comment) {
Support::maybe_unused(a, b, comment);
return make_error(Error::kInvalidState);
}
Error BaseEmitHelper::emitArgMove(const Reg& dst_, TypeId dstTypeId, const Operand_& src_, TypeId srcTypeId, const char* comment) {
DebugUtils::unused(dst_, dstTypeId, src_, srcTypeId, comment);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitHelper::emit_arg_move(const Reg& dst_, TypeId dst_type_id, const Operand_& src_, TypeId src_type_id, const char* comment) {
Support::maybe_unused(dst_, dst_type_id, src_, src_type_id, comment);
return make_error(Error::kInvalidState);
}
// BaseEmitHelper - EmitArgsAssignment
// ===================================
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emit_args_assignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
using Var = FuncArgsContext::Var;
using WorkData = FuncArgsContext::WorkData;
@@ -100,266 +100,266 @@ ASMJIT_FAVOR_SIZE Error BaseEmitHelper::emitArgsAssignment(const FuncFrame& fram
};
Arch arch = frame.arch();
const ArchTraits& archTraits = ArchTraits::byArch(arch);
const ArchTraits& arch_traits = ArchTraits::by_arch(arch);
RAConstraints constraints;
FuncArgsContext ctx;
ASMJIT_PROPAGATE(constraints.init(arch));
ASMJIT_PROPAGATE(ctx.initWorkData(frame, args, &constraints));
ASMJIT_PROPAGATE(ctx.init_work_data(frame, args, &constraints));
#ifdef ASMJIT_DUMP_ARGS_ASSIGNMENT
{
String sb;
dumpAssignment(sb, ctx);
dump_assignment(sb, ctx);
printf("%s\n", sb.data());
}
#endif
auto& workData = ctx._workData;
uint32_t varCount = ctx._varCount;
uint32_t saVarId = ctx._saVarId;
auto& work_data = ctx._work_data;
uint32_t var_count = ctx._var_count;
uint32_t sa_var_id = ctx._sa_var_id;
Reg sp = Reg(_emitter->_gpSignature, archTraits.spRegId());
Reg sp = Reg(_emitter->_gp_signature, arch_traits.sp_reg_id());
Reg sa = sp;
if (frame.hasDynamicAlignment()) {
if (frame.hasPreservedFP()) {
sa.setId(archTraits.fpRegId());
if (frame.has_dynamic_alignment()) {
if (frame.has_preserved_fp()) {
sa.set_id(arch_traits.fp_reg_id());
}
else {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
sa.set_id(sa_var_id < var_count ? ctx._vars[sa_var_id].cur.reg_id() : frame.sa_reg_id());
}
}
// Register to stack and stack to stack moves must be first as now we have
// the biggest chance of having as many as possible unassigned registers.
if (ctx._stackDstMask) {
if (ctx._stack_dst_mask) {
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem baseStackPtr(sp, 0);
BaseMem base_arg_ptr(sa, int32_t(frame.sa_offset(sa.id())));
BaseMem base_stack_ptr(sp, 0);
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
for (uint32_t var_id = 0; var_id < var_count; var_id++) {
Var& var = ctx._vars[var_id];
if (!var.out.isStack()) {
if (!var.out.is_stack()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
ASMJIT_ASSERT(cur.isReg() || cur.isStack());
ASMJIT_ASSERT(cur.is_reg() || cur.is_stack());
Reg reg;
BaseMem dstStackPtr = baseStackPtr.cloneAdjusted(out.stackOffset());
BaseMem srcStackPtr = baseArgPtr.cloneAdjusted(cur.stackOffset());
BaseMem dst_stack_ptr = base_stack_ptr.clone_adjusted(out.stack_offset());
BaseMem src_stack_ptr = base_arg_ptr.clone_adjusted(cur.stack_offset());
if (cur.isIndirect()) {
if (cur.isStack()) {
if (cur.is_indirect()) {
if (cur.is_stack()) {
// TODO: Indirect stack.
return DebugUtils::errored(kErrorInvalidAssignment);
return make_error(Error::kInvalidAssignment);
}
else {
srcStackPtr.setBaseId(cur.regId());
src_stack_ptr.set_base_id(cur.reg_id());
}
}
if (cur.isReg() && !cur.isIndirect()) {
WorkData& wd = workData[RegUtils::groupOf(cur.regType())];
uint32_t regId = cur.regId();
if (cur.is_reg() && !cur.is_indirect()) {
WorkData& wd = work_data[RegUtils::group_of(cur.reg_type())];
uint32_t reg_id = cur.reg_id();
reg.setSignatureAndId(RegUtils::signatureOf(cur.regType()), regId);
wd.unassign(varId, regId);
reg.set_signature_and_id(RegUtils::signature_of(cur.reg_type()), reg_id);
wd.unassign(var_id, reg_id);
}
else {
// Stack to reg move - tricky since we move stack to stack we can decide which register to use. In general
// we follow the rule that IntToInt moves will use GP regs with possibility to signature or zero extend,
// and all other moves will either use GP or VEC regs depending on the size of the move.
OperandSignature signature = getSuitableRegForMemToMemMove(arch, out.typeId(), cur.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState);
OperandSignature signature = get_suitable_reg_for_mem_to_mem_move(arch, out.type_id(), cur.type_id());
if (ASMJIT_UNLIKELY(!signature.is_valid())) {
return make_error(Error::kInvalidState);
}
WorkData& wd = workData[signature.regGroup()];
RegMask availableRegs = wd.availableRegs();
if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorInvalidState);
WorkData& wd = work_data[signature.reg_group()];
RegMask available_regs = wd.available_regs();
if (ASMJIT_UNLIKELY(!available_regs)) {
return make_error(Error::kInvalidState);
}
uint32_t availableId = Support::ctz(availableRegs);
reg.setSignatureAndId(signature, availableId);
uint32_t available_id = Support::ctz(available_regs);
reg.set_signature_and_id(signature, available_id);
ASMJIT_PROPAGATE(emitArgMove(reg, out.typeId(), srcStackPtr, cur.typeId()));
ASMJIT_PROPAGATE(emit_arg_move(reg, out.type_id(), src_stack_ptr, cur.type_id()));
}
if (cur.isIndirect() && cur.isReg()) {
workData[RegGroup::kGp].unassign(varId, cur.regId());
if (cur.is_indirect() && cur.is_reg()) {
work_data[RegGroup::kGp].unassign(var_id, cur.reg_id());
}
// Register to stack move.
ASMJIT_PROPAGATE(emitRegMove(dstStackPtr, reg, cur.typeId()));
var.markDone();
ASMJIT_PROPAGATE(emit_reg_move(dst_stack_ptr, reg, cur.type_id()));
var.mark_done();
}
}
// Shuffle all registers that are currently assigned accordingly to target assignment.
uint32_t workFlags = kWorkNone;
uint32_t work_flags = kWorkNone;
for (;;) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone() || !var.cur.isReg()) {
for (uint32_t var_id = 0; var_id < var_count; var_id++) {
Var& var = ctx._vars[var_id];
if (var.is_done() || !var.cur.is_reg()) {
continue;
}
FuncValue& cur = var.cur;
FuncValue& out = var.out;
RegGroup curGroup = RegUtils::groupOf(cur.regType());
RegGroup outGroup = RegUtils::groupOf(out.regType());
RegGroup cur_group = RegUtils::group_of(cur.reg_type());
RegGroup out_group = RegUtils::group_of(out.reg_type());
uint32_t curId = cur.regId();
uint32_t outId = out.regId();
uint32_t cur_id = cur.reg_id();
uint32_t out_id = out.reg_id();
if (curGroup != outGroup) {
if (cur_group != out_group) {
// TODO: Conversion is not supported.
return DebugUtils::errored(kErrorInvalidAssignment);
return make_error(Error::kInvalidAssignment);
}
else {
WorkData& wd = workData[outGroup];
if (!wd.isAssigned(outId) || curId == outId) {
WorkData& wd = work_data[out_group];
if (!wd.is_assigned(out_id) || cur_id == out_id) {
EmitMove:
ASMJIT_PROPAGATE(
emitArgMove(
Reg(RegUtils::signatureOf(out.regType()), outId), out.typeId(),
Reg(RegUtils::signatureOf(cur.regType()), curId), cur.typeId()));
emit_arg_move(
Reg(RegUtils::signature_of(out.reg_type()), out_id), out.type_id(),
Reg(RegUtils::signature_of(cur.reg_type()), cur_id), cur.type_id()));
// Only reassign if this is not a sign/zero extension that happens on the same in/out register.
if (curId != outId) {
wd.reassign(varId, outId, curId);
if (cur_id != out_id) {
wd.reassign(var_id, out_id, cur_id);
}
cur.initReg(out.regType(), outId, out.typeId());
cur.init_reg(out.reg_type(), out_id, out.type_id());
if (outId == out.regId()) {
var.markDone();
if (out_id == out.reg_id()) {
var.mark_done();
}
workFlags |= kWorkDidSome | kWorkPending;
work_flags |= kWorkDidSome | kWorkPending;
}
else {
uint32_t altId = wd._physToVarId[outId];
Var& altVar = ctx._vars[altId];
uint32_t alt_id = wd._phys_to_var_id[out_id];
Var& alt_var = ctx._vars[alt_id];
if (!altVar.out.isInitialized() || (altVar.out.isReg() && altVar.out.regId() == curId)) {
if (!alt_var.out.is_initialized() || (alt_var.out.is_reg() && alt_var.out.reg_id() == cur_id)) {
// Only few architectures provide swap operations, and only for few register groups.
if (archTraits.hasInstRegSwap(curGroup)) {
RegType highestType = Support::max(cur.regType(), altVar.cur.regType());
if (Support::isBetween(highestType, RegType::kGp8Lo, RegType::kGp16)) {
highestType = RegType::kGp32;
if (arch_traits.has_inst_reg_swap(cur_group)) {
RegType highest_type = Support::max(cur.reg_type(), alt_var.cur.reg_type());
if (Support::is_between(highest_type, RegType::kGp8Lo, RegType::kGp16)) {
highest_type = RegType::kGp32;
}
OperandSignature signature = RegUtils::signatureOf(highestType);
ASMJIT_PROPAGATE(emitRegSwap(Reg(signature, outId), Reg(signature, curId)));
OperandSignature signature = RegUtils::signature_of(highest_type);
ASMJIT_PROPAGATE(emit_reg_swap(Reg(signature, out_id), Reg(signature, cur_id)));
wd.swap(varId, curId, altId, outId);
cur.setRegId(outId);
var.markDone();
altVar.cur.setRegId(curId);
wd.swap(var_id, cur_id, alt_id, out_id);
cur.set_reg_id(out_id);
var.mark_done();
alt_var.cur.set_reg_id(cur_id);
if (altVar.out.isInitialized()) {
altVar.markDone();
if (alt_var.out.is_initialized()) {
alt_var.mark_done();
}
workFlags |= kWorkDidSome;
work_flags |= kWorkDidSome;
}
else {
// If there is a scratch register it can be used to perform the swap.
RegMask availableRegs = wd.availableRegs();
if (availableRegs) {
RegMask inOutRegs = wd.dstRegs();
if (availableRegs & ~inOutRegs) {
availableRegs &= ~inOutRegs;
RegMask available_regs = wd.available_regs();
if (available_regs) {
RegMask in_out_regs = wd.dst_regs();
if (available_regs & ~in_out_regs) {
available_regs &= ~in_out_regs;
}
outId = Support::ctz(availableRegs);
out_id = Support::ctz(available_regs);
goto EmitMove;
}
else {
workFlags |= kWorkPending;
work_flags |= kWorkPending;
}
}
}
else {
workFlags |= kWorkPending;
work_flags |= kWorkPending;
}
}
}
}
if (!(workFlags & kWorkPending)) {
if (!(work_flags & kWorkPending)) {
break;
}
// If we did nothing twice it means that something is really broken.
if ((workFlags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) {
return DebugUtils::errored(kErrorInvalidState);
if ((work_flags & (kWorkDidSome | kWorkPostponed)) == kWorkPostponed) {
return make_error(Error::kInvalidState);
}
workFlags = (workFlags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
work_flags = (work_flags & kWorkDidSome) ? kWorkNone : kWorkPostponed;
}
// Load arguments passed by stack into registers. This is pretty simple and
// it never requires multiple iterations like the previous phase.
if (ctx._hasStackSrc) {
uint32_t iterCount = 1;
if (frame.hasDynamicAlignment() && !frame.hasPreservedFP()) {
sa.setId(saVarId < varCount ? ctx._vars[saVarId].cur.regId() : frame.saRegId());
if (ctx._has_stack_src) {
uint32_t iter_count = 1;
if (frame.has_dynamic_alignment() && !frame.has_preserved_fp()) {
sa.set_id(sa_var_id < var_count ? ctx._vars[sa_var_id].cur.reg_id() : frame.sa_reg_id());
}
// Base address of all arguments passed by stack.
BaseMem baseArgPtr(sa, int32_t(frame.saOffset(sa.id())));
BaseMem base_arg_ptr(sa, int32_t(frame.sa_offset(sa.id())));
for (uint32_t iter = 0; iter < iterCount; iter++) {
for (uint32_t varId = 0; varId < varCount; varId++) {
Var& var = ctx._vars[varId];
if (var.isDone()) {
for (uint32_t iter = 0; iter < iter_count; iter++) {
for (uint32_t var_id = 0; var_id < var_count; var_id++) {
Var& var = ctx._vars[var_id];
if (var.is_done()) {
continue;
}
if (var.cur.isStack()) {
ASMJIT_ASSERT(var.out.isReg());
if (var.cur.is_stack()) {
ASMJIT_ASSERT(var.out.is_reg());
uint32_t outId = var.out.regId();
RegType outType = var.out.regType();
uint32_t out_id = var.out.reg_id();
RegType out_type = var.out.reg_type();
RegGroup group = RegUtils::groupOf(outType);
WorkData& wd = workData[group];
RegGroup group = RegUtils::group_of(out_type);
WorkData& wd = work_data[group];
if (outId == sa.id() && group == RegGroup::kGp) {
// This register will be processed last as we still need `saRegId`.
if (iterCount == 1) {
iterCount++;
if (out_id == sa.id() && group == RegGroup::kGp) {
// This register will be processed last as we still need `sa_reg_id`.
if (iter_count == 1) {
iter_count++;
continue;
}
wd.unassign(wd._physToVarId[outId], outId);
wd.unassign(wd._phys_to_var_id[out_id], out_id);
}
Reg dstReg = Reg(RegUtils::signatureOf(outType), outId);
BaseMem srcMem = baseArgPtr.cloneAdjusted(var.cur.stackOffset());
Reg dst_reg = Reg(RegUtils::signature_of(out_type), out_id);
BaseMem src_mem = base_arg_ptr.clone_adjusted(var.cur.stack_offset());
ASMJIT_PROPAGATE(emitArgMove(
dstReg, var.out.typeId(),
srcMem, var.cur.typeId()));
ASMJIT_PROPAGATE(emit_arg_move(
dst_reg, var.out.type_id(),
src_mem, var.cur.type_id()));
wd.assign(varId, outId);
var.cur.initReg(outType, outId, var.cur.typeId(), FuncValue::kFlagIsDone);
wd.assign(var_id, out_id);
var.cur.init_reg(out_type, out_id, var.cur.type_id(), FuncValue::kFlagIsDone);
}
}
}
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_END_NAMESPACE

View File

@@ -18,9 +18,10 @@ ASMJIT_BEGIN_NAMESPACE
//! Helper class that provides utilities for each supported architecture.
class BaseEmitHelper {
public:
protected:
BaseEmitter* _emitter;
public:
ASMJIT_INLINE_NODEBUG explicit BaseEmitHelper(BaseEmitter* emitter = nullptr) noexcept
: _emitter(emitter) {}
@@ -29,16 +30,14 @@ public:
[[nodiscard]]
ASMJIT_INLINE_NODEBUG BaseEmitter* emitter() const noexcept { return _emitter; }
ASMJIT_INLINE_NODEBUG void setEmitter(BaseEmitter* emitter) noexcept { _emitter = emitter; }
//! Emits a pure move operation between two registers or the same type or between a register and its home
//! slot. This function does not handle register conversion.
virtual Error emitRegMove(
virtual Error emit_reg_move(
const Operand_& dst_,
const Operand_& src_, TypeId typeId, const char* comment = nullptr);
const Operand_& src_, TypeId type_id, const char* comment = nullptr);
//! Emits swap between two registers.
virtual Error emitRegSwap(
virtual Error emit_reg_swap(
const Reg& a,
const Reg& b, const char* comment = nullptr);
@@ -47,11 +46,11 @@ public:
//! This function can handle the necessary conversion from one argument to another, and from one register type
//! to another, if it's possible. Any attempt of conversion that requires third register of a different group
//! (for example conversion from K to MMX on X86/X64) will fail.
virtual Error emitArgMove(
const Reg& dst_, TypeId dstTypeId,
const Operand_& src_, TypeId srcTypeId, const char* comment = nullptr);
virtual Error emit_arg_move(
const Reg& dst_, TypeId dst_type_id,
const Operand_& src_, TypeId src_type_id, const char* comment = nullptr);
Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
Error emit_args_assignment(const FuncFrame& frame, const FuncArgsAssignment& args);
};
//! \}

View File

@@ -14,12 +14,12 @@ ASMJIT_BEGIN_NAMESPACE
// BaseEmitter - Construction & Destruction
// ========================================
BaseEmitter::BaseEmitter(EmitterType emitterType) noexcept
: _emitterType(emitterType) {}
BaseEmitter::BaseEmitter(EmitterType emitter_type) noexcept
: _emitter_type(emitter_type) {}
BaseEmitter::~BaseEmitter() noexcept {
if (_code) {
_addEmitterFlags(EmitterFlags::kDestroyed);
_add_emitter_flags(EmitterFlags::kDestroyed);
_code->detach(this);
}
}
@@ -29,7 +29,7 @@ BaseEmitter::~BaseEmitter() noexcept {
Error BaseEmitter::finalize() {
// Does nothing by default, overridden by `BaseBuilder` and `BaseCompiler`.
return kErrorOk;
return Error::kOk;
}
// BaseEmitter - Internals
@@ -38,98 +38,98 @@ Error BaseEmitter::finalize() {
static constexpr EmitterFlags kEmitterPreservedFlags = EmitterFlags::kOwnLogger | EmitterFlags::kOwnErrorHandler;
static ASMJIT_NOINLINE void BaseEmitter_updateForcedOptions(BaseEmitter* self) noexcept {
bool emitComments = false;
bool hasDiagnosticOptions = false;
bool emit_comments = false;
bool has_diagnostic_options = false;
if (self->emitterType() == EmitterType::kAssembler) {
if (self->emitter_type() == EmitterType::kAssembler) {
// Assembler: Don't emit comments if logger is not attached.
emitComments = self->_code != nullptr && self->_logger != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateAssembler);
emit_comments = self->_code != nullptr && self->_logger != nullptr;
has_diagnostic_options = self->has_diagnostic_option(DiagnosticOptions::kValidateAssembler);
}
else {
// Builder/Compiler: Always emit comments, we cannot assume they won't be used.
emitComments = self->_code != nullptr;
hasDiagnosticOptions = self->hasDiagnosticOption(DiagnosticOptions::kValidateIntermediate);
emit_comments = self->_code != nullptr;
has_diagnostic_options = self->has_diagnostic_option(DiagnosticOptions::kValidateIntermediate);
}
if (emitComments) {
self->_addEmitterFlags(EmitterFlags::kLogComments);
if (emit_comments) {
self->_add_emitter_flags(EmitterFlags::kLogComments);
}
else {
self->_clearEmitterFlags(EmitterFlags::kLogComments);
self->_clear_emitter_flags(EmitterFlags::kLogComments);
}
// The reserved option tells emitter (Assembler/Builder/Compiler) that there may be either a border
// case (CodeHolder not attached, for example) or that logging or validation is required.
if (self->_code == nullptr || self->_logger || hasDiagnosticOptions) {
self->_forcedInstOptions |= InstOptions::kReserved;
if (self->_code == nullptr || self->_logger || has_diagnostic_options) {
self->_forced_inst_options |= InstOptions::kReserved;
}
else {
self->_forcedInstOptions &= ~InstOptions::kReserved;
self->_forced_inst_options &= ~InstOptions::kReserved;
}
}
// BaseEmitter - Diagnostic Options
// ================================
void BaseEmitter::addDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions |= options;
void BaseEmitter::add_diagnostic_options(DiagnosticOptions options) noexcept {
_diagnostic_options |= options;
BaseEmitter_updateForcedOptions(this);
}
void BaseEmitter::clearDiagnosticOptions(DiagnosticOptions options) noexcept {
_diagnosticOptions &= ~options;
void BaseEmitter::clear_diagnostic_options(DiagnosticOptions options) noexcept {
_diagnostic_options &= ~options;
BaseEmitter_updateForcedOptions(this);
}
// BaseEmitter - Logging
// =====================
void BaseEmitter::setLogger(Logger* logger) noexcept {
void BaseEmitter::set_logger(Logger* logger) noexcept {
#ifndef ASMJIT_NO_LOGGING
if (logger) {
_logger = logger;
_addEmitterFlags(EmitterFlags::kOwnLogger);
_add_emitter_flags(EmitterFlags::kOwnLogger);
}
else {
_logger = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnLogger);
_clear_emitter_flags(EmitterFlags::kOwnLogger);
if (_code) {
_logger = _code->logger();
}
}
BaseEmitter_updateForcedOptions(this);
#else
DebugUtils::unused(logger);
Support::maybe_unused(logger);
#endif
}
// BaseEmitter - Error Handling
// ============================
void BaseEmitter::setErrorHandler(ErrorHandler* errorHandler) noexcept {
if (errorHandler) {
_errorHandler = errorHandler;
_addEmitterFlags(EmitterFlags::kOwnErrorHandler);
void BaseEmitter::set_error_handler(ErrorHandler* error_handler) noexcept {
if (error_handler) {
_error_handler = error_handler;
_add_emitter_flags(EmitterFlags::kOwnErrorHandler);
}
else {
_errorHandler = nullptr;
_clearEmitterFlags(EmitterFlags::kOwnErrorHandler);
_error_handler = nullptr;
_clear_emitter_flags(EmitterFlags::kOwnErrorHandler);
if (_code) {
_errorHandler = _code->errorHandler();
_error_handler = _code->error_handler();
}
}
}
Error BaseEmitter::_reportError(Error err, const char* message) {
ASMJIT_ASSERT(err != kErrorOk);
Error BaseEmitter::_report_error(Error err, const char* message) {
ASMJIT_ASSERT(err != Error::kOk);
ErrorHandler* eh = _errorHandler;
ErrorHandler* eh = _error_handler;
if (eh) {
if (!message) {
message = DebugUtils::errorAsString(err);
message = DebugUtils::error_as_string(err);
}
eh->handleError(err, message, this);
eh->handle_error(err, message, this);
}
return err;
@@ -140,181 +140,181 @@ Error BaseEmitter::_reportError(Error err, const char* message) {
// [[pure virtual]]
Error BaseEmitter::section(Section* section) {
DebugUtils::unused(section);
return DebugUtils::errored(kErrorInvalidState);
Support::maybe_unused(section);
return make_error(Error::kInvalidState);
}
// BaseEmitter - Labels
// ====================
// [[pure virtual]]
Label BaseEmitter::newLabel() {
Label BaseEmitter::new_label() {
return Label(Globals::kInvalidId);
}
// [[pure virtual]]
Label BaseEmitter::newNamedLabel(const char* name, size_t nameSize, LabelType type, uint32_t parentId) {
DebugUtils::unused(name, nameSize, type, parentId);
Label BaseEmitter::new_named_label(const char* name, size_t name_size, LabelType type, uint32_t parent_id) {
Support::maybe_unused(name, name_size, type, parent_id);
return Label(Globals::kInvalidId);
}
Label BaseEmitter::labelByName(const char* name, size_t nameSize, uint32_t parentId) noexcept {
return Label(_code ? _code->labelIdByName(name, nameSize, parentId) : Globals::kInvalidId);
Label BaseEmitter::label_by_name(const char* name, size_t name_size, uint32_t parent_id) noexcept {
return Label(_code ? _code->label_id_by_name(name, name_size, parent_id) : Globals::kInvalidId);
}
// [[pure virtual]]
Error BaseEmitter::bind(const Label& label) {
DebugUtils::unused(label);
return DebugUtils::errored(kErrorInvalidState);
Support::maybe_unused(label);
return make_error(Error::kInvalidState);
}
bool BaseEmitter::isLabelValid(uint32_t labelId) const noexcept {
return _code && labelId < _code->labelCount();
bool BaseEmitter::is_label_valid(uint32_t label_id) const noexcept {
return _code && label_id < _code->label_count();
}
// BaseEmitter - Emit (Low-Level)
// ==============================
using EmitterUtils::noExt;
using EmitterUtils::no_ext;
Error BaseEmitter::_emitI(InstId instId) {
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
Error BaseEmitter::_emitI(InstId inst_id) {
return _emit(inst_id, no_ext[0], no_ext[1], no_ext[2], no_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0) {
return _emit(instId, o0, noExt[1], noExt[2], noExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0) {
return _emit(inst_id, o0, no_ext[1], no_ext[2], no_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1) {
return _emit(instId, o0, o1, noExt[2], noExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0, const Operand_& o1) {
return _emit(inst_id, o0, o1, no_ext[2], no_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(instId, o0, o1, o2, noExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2) {
return _emit(inst_id, o0, o1, o2, no_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ opExt[3] = { o3 };
return _emit(instId, o0, o1, o2, opExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3) {
Operand_ op_ext[3] = { o3 };
return _emit(inst_id, o0, o1, o2, op_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ opExt[3] = { o3, o4 };
return _emit(instId, o0, o1, o2, opExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4) {
Operand_ op_ext[3] = { o3, o4 };
return _emit(inst_id, o0, o1, o2, op_ext);
}
Error BaseEmitter::_emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ opExt[3] = { o3, o4, o5 };
return _emit(instId, o0, o1, o2, opExt);
Error BaseEmitter::_emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5) {
Operand_ op_ext[3] = { o3, o4, o5 };
return _emit(inst_id, o0, o1, o2, op_ext);
}
// [[pure virtual]]
Error BaseEmitter::_emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt) {
DebugUtils::unused(instId, o0, o1, o2, oExt);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::_emit(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) {
Support::maybe_unused(inst_id, o0, o1, o2, op_ext);
return make_error(Error::kInvalidState);
}
Error BaseEmitter::_emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
Error BaseEmitter::_emit_op_array(InstId inst_id, const Operand_* operands, size_t op_count) {
const Operand_* op = operands;
Operand_ opExt[3];
Operand_ op_ext[3];
switch (opCount) {
switch (op_count) {
case 0:
return _emit(instId, noExt[0], noExt[1], noExt[2], noExt);
return _emit(inst_id, no_ext[0], no_ext[1], no_ext[2], no_ext);
case 1:
return _emit(instId, op[0], noExt[1], noExt[2], noExt);
return _emit(inst_id, op[0], no_ext[1], no_ext[2], no_ext);
case 2:
return _emit(instId, op[0], op[1], noExt[2], noExt);
return _emit(inst_id, op[0], op[1], no_ext[2], no_ext);
case 3:
return _emit(instId, op[0], op[1], op[2], noExt);
return _emit(inst_id, op[0], op[1], op[2], no_ext);
case 4:
opExt[0] = op[3];
opExt[1].reset();
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
op_ext[0] = op[3];
op_ext[1].reset();
op_ext[2].reset();
return _emit(inst_id, op[0], op[1], op[2], op_ext);
case 5:
opExt[0] = op[3];
opExt[1] = op[4];
opExt[2].reset();
return _emit(instId, op[0], op[1], op[2], opExt);
op_ext[0] = op[3];
op_ext[1] = op[4];
op_ext[2].reset();
return _emit(inst_id, op[0], op[1], op[2], op_ext);
case 6:
return _emit(instId, op[0], op[1], op[2], op + 3);
return _emit(inst_id, op[0], op[1], op[2], op + 3);
default:
return DebugUtils::errored(kErrorInvalidArgument);
return make_error(Error::kInvalidArgument);
}
}
// BaseEmitter - Emit Utilities
// ============================
Error BaseEmitter::emitProlog(const FuncFrame& frame) {
Error BaseEmitter::emit_prolog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return make_error(Error::kNotInitialized);
return _funcs.emitProlog(this, frame);
return _funcs.emit_prolog(this, frame);
}
Error BaseEmitter::emitEpilog(const FuncFrame& frame) {
Error BaseEmitter::emit_epilog(const FuncFrame& frame) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return make_error(Error::kNotInitialized);
return _funcs.emitEpilog(this, frame);
return _funcs.emit_epilog(this, frame);
}
Error BaseEmitter::emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
Error BaseEmitter::emit_args_assignment(const FuncFrame& frame, const FuncArgsAssignment& args) {
if (ASMJIT_UNLIKELY(!_code))
return DebugUtils::errored(kErrorNotInitialized);
return make_error(Error::kNotInitialized);
return _funcs.emitArgsAssignment(this, frame, args);
return _funcs.emit_args_assignment(this, frame, args);
}
// BaseEmitter - Align
// ===================
// [[pure virtual]]
Error BaseEmitter::align(AlignMode alignMode, uint32_t alignment) {
DebugUtils::unused(alignMode, alignment);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::align(AlignMode align_mode, uint32_t alignment) {
Support::maybe_unused(align_mode, alignment);
return make_error(Error::kInvalidState);
}
// BaseEmitter - Embed
// ===================
// [[pure virtual]]
Error BaseEmitter::embed(const void* data, size_t dataSize) {
DebugUtils::unused(data, dataSize);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::embed(const void* data, size_t data_size) {
Support::maybe_unused(data, data_size);
return make_error(Error::kInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount) {
DebugUtils::unused(typeId, data, itemCount, repeatCount);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::embed_data_array(TypeId type_id, const void* data, size_t item_count, size_t repeat_count) {
Support::maybe_unused(type_id, data, item_count, repeat_count);
return make_error(Error::kInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedConstPool(const Label& label, const ConstPool& pool) {
DebugUtils::unused(label, pool);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::embed_const_pool(const Label& label, const ConstPool& pool) {
Support::maybe_unused(label, pool);
return make_error(Error::kInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabel(const Label& label, size_t dataSize) {
DebugUtils::unused(label, dataSize);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::embed_label(const Label& label, size_t data_size) {
Support::maybe_unused(label, data_size);
return make_error(Error::kInvalidState);
}
// [[pure virtual]]
Error BaseEmitter::embedLabelDelta(const Label& label, const Label& base, size_t dataSize) {
DebugUtils::unused(label, base, dataSize);
return DebugUtils::errored(kErrorInvalidState);
Error BaseEmitter::embed_label_delta(const Label& label, const Label& base, size_t data_size) {
Support::maybe_unused(label, base, data_size);
return make_error(Error::kInvalidState);
}
// BaseEmitter - Comment
@@ -322,16 +322,16 @@ Error BaseEmitter::embedLabelDelta(const Label& label, const Label& base, size_t
// [[pure virtual]]
Error BaseEmitter::comment(const char* data, size_t size) {
DebugUtils::unused(data, size);
return DebugUtils::errored(kErrorInvalidState);
Support::maybe_unused(data, size);
return make_error(Error::kInvalidState);
}
Error BaseEmitter::commentf(const char* fmt, ...) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!has_emitter_flag(EmitterFlags::kLogComments)) {
if (!has_emitter_flag(EmitterFlags::kAttached)) {
return report_error(make_error(Error::kNotInitialized));
}
return kErrorOk;
return Error::kOk;
}
#ifndef ASMJIT_NO_LOGGING
@@ -339,102 +339,102 @@ Error BaseEmitter::commentf(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
Error err = sb.appendVFormat(fmt, ap);
Error err = sb.append_vformat(fmt, ap);
va_end(ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt);
return kErrorOk;
Support::maybe_unused(fmt);
return Error::kOk;
#endif
}
Error BaseEmitter::commentv(const char* fmt, va_list ap) {
if (!hasEmitterFlag(EmitterFlags::kLogComments)) {
if (!hasEmitterFlag(EmitterFlags::kAttached)) {
return reportError(DebugUtils::errored(kErrorNotInitialized));
if (!has_emitter_flag(EmitterFlags::kLogComments)) {
if (!has_emitter_flag(EmitterFlags::kAttached)) {
return report_error(make_error(Error::kNotInitialized));
}
return kErrorOk;
return Error::kOk;
}
#ifndef ASMJIT_NO_LOGGING
StringTmp<1024> sb;
Error err = sb.appendVFormat(fmt, ap);
Error err = sb.append_vformat(fmt, ap);
ASMJIT_PROPAGATE(err);
return comment(sb.data(), sb.size());
#else
DebugUtils::unused(fmt, ap);
return kErrorOk;
Support::maybe_unused(fmt, ap);
return Error::kOk;
#endif
}
// BaseEmitter - Events
// ====================
Error BaseEmitter::onAttach(CodeHolder& code) noexcept {
Error BaseEmitter::on_attach(CodeHolder& code) noexcept {
_code = &code;
_environment = code.environment();
_addEmitterFlags(EmitterFlags::kAttached);
_add_emitter_flags(EmitterFlags::kAttached);
_gpSignature.setBits(
Environment::is32Bit(code.arch())
_gp_signature.set_bits(
Environment::is_32bit(code.arch())
? RegTraits<RegType::kGp32>::kSignature
: RegTraits<RegType::kGp64>::kSignature
);
onSettingsUpdated();
return kErrorOk;
on_settings_updated();
return Error::kOk;
}
Error BaseEmitter::onDetach(CodeHolder& code) noexcept {
DebugUtils::unused(code);
Error BaseEmitter::on_detach(CodeHolder& code) noexcept {
Support::maybe_unused(code);
if (!hasOwnLogger()) {
if (!has_own_logger()) {
_logger = nullptr;
}
if (!hasOwnErrorHandler()) {
_errorHandler = nullptr;
if (!has_own_error_handler()) {
_error_handler = nullptr;
}
_clearEmitterFlags(~kEmitterPreservedFlags);
_instructionAlignment = uint8_t(0);
_forcedInstOptions = InstOptions::kReserved;
_privateData = 0;
_clear_emitter_flags(~kEmitterPreservedFlags);
_instruction_alignment = uint8_t(0);
_forced_inst_options = InstOptions::kReserved;
_private_data = 0;
_environment.reset();
_gpSignature.reset();
_gp_signature.reset();
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;
_inst_options = InstOptions::kNone;
_extra_reg.reset();
_inline_comment = nullptr;
return kErrorOk;
return Error::kOk;
}
Error BaseEmitter::onReinit(CodeHolder& code) noexcept {
Error BaseEmitter::on_reinit(CodeHolder& code) noexcept {
ASMJIT_ASSERT(_code == &code);
DebugUtils::unused(code);
Support::maybe_unused(code);
_instOptions = InstOptions::kNone;
_extraReg.reset();
_inlineComment = nullptr;
_inst_options = InstOptions::kNone;
_extra_reg.reset();
_inline_comment = nullptr;
return kErrorOk;
return Error::kOk;
}
void BaseEmitter::onSettingsUpdated() noexcept {
void BaseEmitter::on_settings_updated() noexcept {
// Only called when attached to CodeHolder by CodeHolder.
ASMJIT_ASSERT(_code != nullptr);
if (!hasOwnLogger()) {
if (!has_own_logger()) {
_logger = _code->logger();
}
if (!hasOwnErrorHandler()) {
_errorHandler = _code->errorHandler();
if (!has_own_error_handler()) {
_error_handler = _code->error_handler();
}
BaseEmitter_updateForcedOptions(this);

View File

@@ -202,7 +202,7 @@ public:
//! Emitter state that can be used to specify options and inline comment of a next node or instruction.
struct State {
InstOptions options;
RegOnly extraReg;
RegOnly extra_reg;
const char* comment;
};
@@ -216,21 +216,21 @@ public:
using FormatInstruction = Error (ASMJIT_CDECL*)(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
const BaseInst& inst, Span<const Operand_> operands) noexcept;
using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept;
using ValidateFunc = Error (ASMJIT_CDECL*)(const BaseInst& inst, const Operand_* operands, size_t op_count, ValidationFlags validation_flags) noexcept;
//! Emit prolog implementation.
EmitProlog emitProlog;
EmitProlog emit_prolog;
//! Emit epilog implementation.
EmitEpilog emitEpilog;
EmitEpilog emit_epilog;
//! Emit arguments assignment implementation.
EmitArgsAssignment emitArgsAssignment;
EmitArgsAssignment emit_args_assignment;
//! Instruction formatter implementation.
FormatInstruction formatInstruction;
FormatInstruction format_instruction;
//! Instruction validation implementation.
ValidateFunc validate;
@@ -244,31 +244,31 @@ public:
//! \{
//! See \ref EmitterType.
EmitterType _emitterType = EmitterType::kNone;
EmitterType _emitter_type = EmitterType::kNone;
//! See \ref EmitterFlags.
EmitterFlags _emitterFlags = EmitterFlags::kNone;
EmitterFlags _emitter_flags = EmitterFlags::kNone;
//! Instruction alignment.
uint8_t _instructionAlignment = 0u;
uint8_t _instruction_alignment = 0u;
//! Validation flags in case validation is used.
//!
//! \note Validation flags are specific to the emitter and they are setup at construction time and then never
//! changed.
ValidationFlags _validationFlags = ValidationFlags::kNone;
ValidationFlags _validation_flags = ValidationFlags::kNone;
//! Validation options.
DiagnosticOptions _diagnosticOptions = DiagnosticOptions::kNone;
DiagnosticOptions _diagnostic_options = DiagnosticOptions::kNone;
//! Encoding options.
EncodingOptions _encodingOptions = EncodingOptions::kNone;
EncodingOptions _encoding_options = EncodingOptions::kNone;
//! Forced instruction options, combined with \ref _instOptions by \ref emit().
InstOptions _forcedInstOptions = InstOptions::kReserved;
//! Forced instruction options, combined with \ref _inst_options by \ref emit().
InstOptions _forced_inst_options = InstOptions::kReserved;
//! All supported architectures in a bit-mask, where LSB is the bit with a zero index.
uint64_t _archMask = 0;
uint64_t _arch_mask = 0;
//! CodeHolder the emitter is attached to.
CodeHolder* _code = nullptr;
@@ -277,37 +277,37 @@ public:
Logger* _logger = nullptr;
//! Attached \ref ErrorHandler.
ErrorHandler* _errorHandler = nullptr;
ErrorHandler* _error_handler = nullptr;
//! Describes the target environment, matches \ref CodeHolder::environment().
Environment _environment {};
//! Native GP register signature (either a 32-bit or 64-bit GP register signature).
OperandSignature _gpSignature {};
OperandSignature _gp_signature {};
//! Internal private data used freely by any emitter.
uint32_t _privateData = 0;
uint32_t _private_data = 0;
//! Next instruction options (affects the next instruction).
InstOptions _instOptions = InstOptions::kNone;
InstOptions _inst_options = InstOptions::kNone;
//! Extra register (op-mask {k} on AVX-512) (affects the next instruction).
RegOnly _extraReg {};
RegOnly _extra_reg {};
//! Inline comment of the next instruction (affects the next instruction).
const char* _inlineComment = nullptr;
const char* _inline_comment = nullptr;
//! Pointer to functions used by backend-specific emitter implementation.
Funcs _funcs {};
//! Emitter attached before this emitter in \ref CodeHolder, otherwise nullptr if there is no emitter before.
BaseEmitter* _attachedPrev = nullptr;
BaseEmitter* _attached_prev = nullptr;
//! Emitter attached after this emitter in \ref CodeHolder, otherwise nullptr if there is no emitter after.
BaseEmitter* _attachedNext = nullptr;
BaseEmitter* _attached_next = nullptr;
//! \}
//! \name Construction & Destruction
//! \{
ASMJIT_API explicit BaseEmitter(EmitterType emitterType) noexcept;
ASMJIT_API explicit BaseEmitter(EmitterType emitter_type) noexcept;
ASMJIT_API virtual ~BaseEmitter() noexcept;
//! \}
@@ -330,37 +330,37 @@ public:
//! Returns the type of this emitter, see `EmitterType`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterType emitterType() const noexcept { return _emitterType; }
ASMJIT_INLINE_NODEBUG EmitterType emitter_type() const noexcept { return _emitter_type; }
//! Returns emitter flags , see `Flags`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EmitterFlags emitterFlags() const noexcept { return _emitterFlags; }
ASMJIT_INLINE_NODEBUG EmitterFlags emitter_flags() const noexcept { return _emitter_flags; }
//! Tests whether the emitter inherits from `BaseAssembler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isAssembler() const noexcept { return _emitterType == EmitterType::kAssembler; }
ASMJIT_INLINE_NODEBUG bool is_assembler() const noexcept { return _emitter_type == EmitterType::kAssembler; }
//! Tests whether the emitter inherits from `BaseBuilder`.
//!
//! \note Both Builder and Compiler emitters would return `true`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBuilder() const noexcept { return uint32_t(_emitterType) >= uint32_t(EmitterType::kBuilder); }
ASMJIT_INLINE_NODEBUG bool is_builder() const noexcept { return uint32_t(_emitter_type) >= uint32_t(EmitterType::kBuilder); }
//! Tests whether the emitter inherits from `BaseCompiler`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isCompiler() const noexcept { return _emitterType == EmitterType::kCompiler; }
ASMJIT_INLINE_NODEBUG bool is_compiler() const noexcept { return _emitter_type == EmitterType::kCompiler; }
//! Tests whether the emitter has the given `flag` enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEmitterFlag(EmitterFlags flag) const noexcept { return Support::test(_emitterFlags, flag); }
ASMJIT_INLINE_NODEBUG bool has_emitter_flag(EmitterFlags flag) const noexcept { return Support::test(_emitter_flags, flag); }
//! Tests whether the emitter is finalized.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFinalized() const noexcept { return hasEmitterFlag(EmitterFlags::kFinalized); }
ASMJIT_INLINE_NODEBUG bool is_finalized() const noexcept { return has_emitter_flag(EmitterFlags::kFinalized); }
//! Tests whether the emitter is destroyed (only used during destruction).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDestroyed() const noexcept { return hasEmitterFlag(EmitterFlags::kDestroyed); }
ASMJIT_INLINE_NODEBUG bool is_destroyed() const noexcept { return has_emitter_flag(EmitterFlags::kDestroyed); }
//! \}
@@ -368,8 +368,8 @@ public:
//! \name Internal Functions
//! \{
ASMJIT_INLINE_NODEBUG void _addEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags |= flags; }
ASMJIT_INLINE_NODEBUG void _clearEmitterFlags(EmitterFlags flags) noexcept { _emitterFlags &= _emitterFlags & ~flags; }
ASMJIT_INLINE_NODEBUG void _add_emitter_flags(EmitterFlags flags) noexcept { _emitter_flags |= flags; }
ASMJIT_INLINE_NODEBUG void _clear_emitter_flags(EmitterFlags flags) noexcept { _emitter_flags &= _emitter_flags & ~flags; }
//! \}
//! \endcond
@@ -389,11 +389,11 @@ public:
//! Tests whether the target architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return environment().is32Bit(); }
ASMJIT_INLINE_NODEBUG bool is_32bit() const noexcept { return environment().is_32bit(); }
//! Tests whether the target architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return environment().is64Bit(); }
ASMJIT_INLINE_NODEBUG bool is_64bit() const noexcept { return environment().is_64bit(); }
//! Returns the target architecture type.
[[nodiscard]]
@@ -401,15 +401,15 @@ public:
//! Returns the target architecture sub-type.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return environment().subArch(); }
ASMJIT_INLINE_NODEBUG SubArch sub_arch() const noexcept { return environment().sub_arch(); }
//! Returns the target architecture's GP register size (4 or 8 bytes).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return environment().registerSize(); }
ASMJIT_INLINE_NODEBUG uint32_t register_size() const noexcept { return environment().register_size(); }
//! Returns a signature of a native general purpose register (either 32-bit or 64-bit depending on the architecture).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OperandSignature gpSignature() const noexcept { return _gpSignature; }
ASMJIT_INLINE_NODEBUG OperandSignature gp_signature() const noexcept { return _gp_signature; }
//! Returns instruction alignment.
//!
@@ -418,7 +418,7 @@ public:
//! - AArch32 - instruction alignment is 4 in A32 mode and 2 in THUMB mode.
//! - AArch64 - instruction alignment is 4
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t instructionAlignment() const noexcept { return _instructionAlignment; }
ASMJIT_INLINE_NODEBUG uint32_t instruction_alignment() const noexcept { return _instruction_alignment; }
//! \}
@@ -427,7 +427,7 @@ public:
//! Tests whether the emitter is initialized (i.e. attached to \ref CodeHolder).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept { return _code != nullptr; }
ASMJIT_INLINE_NODEBUG bool is_initialized() const noexcept { return _code != nullptr; }
//! Finalizes this emitter.
//!
@@ -445,14 +445,14 @@ public:
//! Tests whether the emitter has a logger.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasLogger() const noexcept { return _logger != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_logger() const noexcept { return _logger != nullptr; }
//! Tests whether the emitter has its own logger.
//!
//! Own logger means that it overrides the possible logger that may be used by \ref CodeHolder this emitter is
//! attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnLogger() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnLogger); }
ASMJIT_INLINE_NODEBUG bool has_own_logger() const noexcept { return has_emitter_flag(EmitterFlags::kOwnLogger); }
//! Returns the logger this emitter uses.
//!
@@ -464,15 +464,15 @@ public:
//! Sets or resets the logger of the emitter.
//!
//! If the `logger` argument is non-null then the logger will be considered emitter's own logger, see \ref
//! hasOwnLogger() for more details. If the given `logger` is null then the emitter will automatically use logger
//! has_own_logger() for more details. If the given `logger` is null then the emitter will automatically use logger
//! that is attached to the \ref CodeHolder this emitter is attached to.
ASMJIT_API void setLogger(Logger* logger) noexcept;
ASMJIT_API void set_logger(Logger* logger) noexcept;
//! Resets the logger of this emitter.
//!
//! The emitter will bail to using a logger attached to \ref CodeHolder this emitter is attached to, or no logger
//! at all if \ref CodeHolder doesn't have one.
ASMJIT_INLINE_NODEBUG void resetLogger() noexcept { return setLogger(nullptr); }
ASMJIT_INLINE_NODEBUG void reset_logger() noexcept { return set_logger(nullptr); }
//! \}
@@ -481,42 +481,42 @@ public:
//! Tests whether the emitter has an error handler attached.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasErrorHandler() const noexcept { return _errorHandler != nullptr; }
ASMJIT_INLINE_NODEBUG bool has_error_handler() const noexcept { return _error_handler != nullptr; }
//! Tests whether the emitter has its own error handler.
//!
//! Own error handler means that it overrides the possible error handler that may be used by \ref CodeHolder this
//! emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOwnErrorHandler() const noexcept { return hasEmitterFlag(EmitterFlags::kOwnErrorHandler); }
ASMJIT_INLINE_NODEBUG bool has_own_error_handler() const noexcept { return has_emitter_flag(EmitterFlags::kOwnErrorHandler); }
//! Returns the error handler this emitter uses.
//!
//! The returned error handler is either the emitter's own error handler or it's error handler used by
//! \ref CodeHolder this emitter is attached to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ErrorHandler* errorHandler() const noexcept { return _errorHandler; }
ASMJIT_INLINE_NODEBUG ErrorHandler* error_handler() const noexcept { return _error_handler; }
//! Sets or resets the error handler of the emitter.
ASMJIT_API void setErrorHandler(ErrorHandler* errorHandler) noexcept;
ASMJIT_API void set_error_handler(ErrorHandler* error_handler) noexcept;
//! Resets the error handler.
ASMJIT_INLINE_NODEBUG void resetErrorHandler() noexcept { setErrorHandler(nullptr); }
ASMJIT_INLINE_NODEBUG void reset_error_handler() noexcept { set_error_handler(nullptr); }
//! \cond INTERNAL
ASMJIT_API Error _reportError(Error err, const char* message = nullptr);
ASMJIT_API Error _report_error(Error err, const char* message = nullptr);
//! \endcond
//! Handles the given error in the following way:
//! 1. If the emitter has \ref ErrorHandler attached, it calls its \ref ErrorHandler::handleError() member function
//! first, and then returns the error. The `handleError()` function may throw.
//! 1. If the emitter has \ref ErrorHandler attached, it calls its \ref ErrorHandler::handle_error() member function
//! first, and then returns the error. The `handle_error()` function may throw.
//! 2. if the emitter doesn't have \ref ErrorHandler, the error is simply returned.
ASMJIT_INLINE Error reportError(Error err, const char* message = nullptr) {
Error e = _reportError(err, message);
ASMJIT_INLINE Error report_error(Error err, const char* message = nullptr) {
Error e = _report_error(err, message);
// Static analysis is not working properly without these assumptions.
ASMJIT_ASSUME(e == err);
ASMJIT_ASSUME(e != kErrorOk);
ASMJIT_ASSUME(e != Error::kOk);
return e;
}
@@ -528,16 +528,16 @@ public:
//! Returns encoding options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG EncodingOptions encodingOptions() const noexcept { return _encodingOptions; }
ASMJIT_INLINE_NODEBUG EncodingOptions encoding_options() const noexcept { return _encoding_options; }
//! Tests whether the encoding `option` is set.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasEncodingOption(EncodingOptions option) const noexcept { return Support::test(_encodingOptions, option); }
ASMJIT_INLINE_NODEBUG bool has_encoding_option(EncodingOptions option) const noexcept { return Support::test(_encoding_options, option); }
//! Enables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void addEncodingOptions(EncodingOptions options) noexcept { _encodingOptions |= options; }
ASMJIT_INLINE_NODEBUG void add_encoding_options(EncodingOptions options) noexcept { _encoding_options |= options; }
//! Disables the given encoding `options`.
ASMJIT_INLINE_NODEBUG void clearEncodingOptions(EncodingOptions options) noexcept { _encodingOptions &= ~options; }
ASMJIT_INLINE_NODEBUG void clear_encoding_options(EncodingOptions options) noexcept { _encoding_options &= ~options; }
//! \}
@@ -546,11 +546,11 @@ public:
//! Returns the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnosticOptions() const noexcept { return _diagnosticOptions; }
ASMJIT_INLINE_NODEBUG DiagnosticOptions diagnostic_options() const noexcept { return _diagnostic_options; }
//! Tests whether the given `option` is present in the emitter's diagnostic options.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasDiagnosticOption(DiagnosticOptions option) const noexcept { return Support::test(_diagnosticOptions, option); }
ASMJIT_INLINE_NODEBUG bool has_diagnostic_option(DiagnosticOptions option) const noexcept { return Support::test(_diagnostic_options, option); }
//! Activates the given diagnostic `options`.
//!
@@ -570,12 +570,12 @@ public:
//! instruction is ill-formed. In addition, also \ref DiagnosticOptions::kValidateAssembler can be used, which
//! would not be consumed by Builder / Compiler directly, but it would be propagated to an architecture specific
//! \ref BaseAssembler implementation it creates during \ref BaseEmitter::finalize().
ASMJIT_API void addDiagnosticOptions(DiagnosticOptions options) noexcept;
ASMJIT_API void add_diagnostic_options(DiagnosticOptions options) noexcept;
//! Deactivates the given validation `options`.
//!
//! See \ref addDiagnosticOptions() and \ref DiagnosticOptions for more details.
ASMJIT_API void clearDiagnosticOptions(DiagnosticOptions options) noexcept;
//! See \ref add_diagnostic_options() and \ref DiagnosticOptions for more details.
ASMJIT_API void clear_diagnostic_options(DiagnosticOptions options) noexcept;
//! \}
@@ -588,50 +588,50 @@ public:
//! options have some bits reserved that are used by error handling, logging, and instruction validation purposes.
//! Other options are globals that affect each instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions forcedInstOptions() const noexcept { return _forcedInstOptions; }
ASMJIT_INLINE_NODEBUG InstOptions forced_inst_options() const noexcept { return _forced_inst_options; }
//! Returns options of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstOptions instOptions() const noexcept { return _instOptions; }
ASMJIT_INLINE_NODEBUG InstOptions inst_options() const noexcept { return _inst_options; }
//! Returns options of the next instruction.
ASMJIT_INLINE_NODEBUG void setInstOptions(InstOptions options) noexcept { _instOptions = options; }
ASMJIT_INLINE_NODEBUG void set_inst_options(InstOptions options) noexcept { _inst_options = options; }
//! Adds options of the next instruction.
ASMJIT_INLINE_NODEBUG void addInstOptions(InstOptions options) noexcept { _instOptions |= options; }
ASMJIT_INLINE_NODEBUG void add_inst_options(InstOptions options) noexcept { _inst_options |= options; }
//! Resets options of the next instruction.
ASMJIT_INLINE_NODEBUG void resetInstOptions() noexcept { _instOptions = InstOptions::kNone; }
ASMJIT_INLINE_NODEBUG void reset_inst_options() noexcept { _inst_options = InstOptions::kNone; }
//! Tests whether the extra register operand is valid.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
ASMJIT_INLINE_NODEBUG bool has_extra_reg() const noexcept { return _extra_reg.is_reg(); }
//! Returns an extra operand that will be used by the next instruction (architecture specific).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG const RegOnly& extra_reg() const noexcept { return _extra_reg; }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void set_extra_reg(const Reg& reg) noexcept { _extra_reg.init(reg); }
//! Sets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void set_extra_reg(const RegOnly& reg) noexcept { _extra_reg.init(reg); }
//! Resets an extra operand that will be used by the next instruction (architecture specific).
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
ASMJIT_INLINE_NODEBUG void reset_extra_reg() noexcept { _extra_reg.reset(); }
//! Returns comment/annotation of the next instruction.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const char* inlineComment() const noexcept { return _inlineComment; }
ASMJIT_INLINE_NODEBUG const char* inline_comment() const noexcept { return _inline_comment; }
//! Sets comment/annotation of the next instruction.
//!
//! \note This string is set back to null by `_emit()`, but until that it has to remain valid as the Emitter is not
//! required to make a copy of it (and it would be slow to do that for each instruction).
ASMJIT_INLINE_NODEBUG void setInlineComment(const char* s) noexcept { _inlineComment = s; }
ASMJIT_INLINE_NODEBUG void set_inline_comment(const char* s) noexcept { _inline_comment = s; }
//! Resets the comment/annotation to nullptr.
ASMJIT_INLINE_NODEBUG void resetInlineComment() noexcept { _inlineComment = nullptr; }
ASMJIT_INLINE_NODEBUG void reset_inline_comment() noexcept { _inline_comment = nullptr; }
//! \}
@@ -645,10 +645,10 @@ public:
//! which is set explicitly, then the state would contain it. This allows to mimic the syntax of assemblers such
//! as X86. For example `rep().movs(...)` would map to a `REP MOVS` instuction on X86. The same applies to various
//! hints and the use of a mask register in AVX-512 mode.
ASMJIT_INLINE_NODEBUG void resetState() noexcept {
resetInstOptions();
resetExtraReg();
resetInlineComment();
ASMJIT_INLINE_NODEBUG void reset_state() noexcept {
reset_inst_options();
reset_extra_reg();
reset_inline_comment();
}
//! \cond INTERNAL
@@ -656,9 +656,9 @@ public:
//! Grabs the current emitter state and resets the emitter state at the same time, returning the state the emitter
//! had before the state was reset.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG State _grabState() noexcept {
State s{_instOptions | _forcedInstOptions, _extraReg, _inlineComment};
resetState();
ASMJIT_INLINE_NODEBUG State _grab_state() noexcept {
State s{_inst_options | _forced_inst_options, _extra_reg, _inline_comment};
reset_state();
return s;
}
//! \endcond
@@ -680,19 +680,37 @@ public:
//! Creates a new label.
[[nodiscard]]
ASMJIT_API virtual Label newLabel();
ASMJIT_API virtual Label new_label();
//! Creates a new named label.
[[nodiscard]]
ASMJIT_API virtual Label newNamedLabel(const char* name, size_t nameSize = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parentId = Globals::kInvalidId);
ASMJIT_API virtual Label new_named_label(const char* name, size_t name_size = SIZE_MAX, LabelType type = LabelType::kGlobal, uint32_t parent_id = Globals::kInvalidId);
//! \overload
[[nodiscard]]
ASMJIT_INLINE Label new_named_label(Span<const char> name, LabelType type = LabelType::kGlobal, uint32_t parent_id = Globals::kInvalidId) {
return new_named_label(name.data(), name.size(), type, parent_id);
}
//! Creates a new anonymous label with a name, which can only be used for debugging purposes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newAnonymousLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kAnonymous); }
ASMJIT_INLINE_NODEBUG Label new_anonymous_label(const char* name, size_t name_size = SIZE_MAX) {
return new_named_label(name, name_size, LabelType::kAnonymous);
}
//! \overload
[[nodiscard]]
ASMJIT_INLINE Label new_anonymous_label(Span<const char> name) { return new_anonymous_label(name.data(), name.size()); }
//! Creates a new external label.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Label newExternalLabel(const char* name, size_t nameSize = SIZE_MAX) { return newNamedLabel(name, nameSize, LabelType::kExternal); }
ASMJIT_INLINE_NODEBUG Label new_external_label(const char* name, size_t name_size = SIZE_MAX) {
return new_named_label(name, name_size, LabelType::kExternal);
}
//! \overload
[[nodiscard]]
ASMJIT_INLINE Label new_external_label(Span<const char> name) { return new_external_label(name.data(), name.size()); }
//! Returns `Label` by `name`.
//!
@@ -701,7 +719,13 @@ public:
//! \note This function doesn't trigger ErrorHandler in case the name is invalid or no such label exist. You must
//! always check the validity of the `Label` returned.
[[nodiscard]]
ASMJIT_API Label labelByName(const char* name, size_t nameSize = SIZE_MAX, uint32_t parentId = Globals::kInvalidId) noexcept;
ASMJIT_API Label label_by_name(const char* name, size_t name_size = SIZE_MAX, uint32_t parent_id = Globals::kInvalidId) noexcept;
//! \overload
[[nodiscard]]
ASMJIT_API Label label_by_name(Span<const char> name, uint32_t parent_id = Globals::kInvalidId) noexcept {
return label_by_name(name.data(), name.size(), parent_id);
}
//! Binds the `label` to the current position of the current section.
//!
@@ -710,11 +734,11 @@ public:
//! Tests whether the label `id` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_API bool isLabelValid(uint32_t labelId) const noexcept;
ASMJIT_API bool is_label_valid(uint32_t label_id) const noexcept;
//! Tests whether the `label` is valid (i.e. registered).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLabelValid(const Label& label) const noexcept { return isLabelValid(label.id()); }
ASMJIT_INLINE_NODEBUG bool is_label_valid(const Label& label) const noexcept { return is_label_valid(label.id()); }
//! \}
@@ -723,48 +747,48 @@ public:
// NOTE: These `emit()` helpers are designed to address a code-bloat generated by C++ compilers to call a function
// having many arguments. Each parameter to `_emit()` requires some code to pass it, which means that if we default
// to 5 arguments in `_emit()` and instId the C++ compiler would have to generate a virtual function call having 5
// to 5 arguments in `_emit()` and inst_id the C++ compiler would have to generate a virtual function call having 5
// parameters and additional `this` argument, which is quite a lot. Since by default most instructions have 2 to 3
// operands it's better to introduce helpers that pass from 0 to 6 operands that help to reduce the size of emit(...)
// function call.
//! Emits an instruction (internal).
ASMJIT_API Error _emitI(InstId instId);
ASMJIT_API Error _emitI(InstId inst_id);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0, const Operand_& o1);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4);
//! \overload
ASMJIT_API Error _emitI(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
ASMJIT_API Error _emitI(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_& o3, const Operand_& o4, const Operand_& o5);
//! Emits an instruction `instId` with the given `operands`.
//! Emits an instruction `inst_id` with the given `operands`.
//!
//! This is the most universal way of emitting code, which accepts an instruction identifier and instruction
//! operands. This is called an "unchecked" API as emit doesn't provide any type checks at compile-time. This
//! allows to emit instruction with just \ref Operand instances, which could be handy in some cases - for
//! example emitting generic code where you don't know whether some operand is register, memory, or immediate.
template<typename... Args>
ASMJIT_INLINE_NODEBUG Error emit(InstId instId, Args&&... operands) {
return _emitI(instId, Support::ForwardOp<Args>::forward(operands)...);
ASMJIT_INLINE_NODEBUG Error emit(InstId inst_id, Args&&... operands) {
return _emitI(inst_id, Support::ForwardOp<Args>::forward(operands)...);
}
//! Similar to \ref emit(), but uses array of `operands` instead.
ASMJIT_INLINE_NODEBUG Error emitOpArray(InstId instId, const Operand_* operands, size_t opCount) {
return _emitOpArray(instId, operands, opCount);
ASMJIT_INLINE_NODEBUG Error emit_op_array(InstId inst_id, const Operand_* operands, size_t op_count) {
return _emit_op_array(inst_id, operands, op_count);
}
//! Similar to \ref emit(), but emits instruction with both instruction options and extra register, followed
//! by an array of `operands`.
ASMJIT_INLINE Error emitInst(const BaseInst& inst, const Operand_* operands, size_t opCount) {
setInstOptions(inst.options());
setExtraReg(inst.extraReg());
return _emitOpArray(inst.id(), operands, opCount);
ASMJIT_INLINE Error emit_inst(const BaseInst& inst, const Operand_* operands, size_t op_count) {
set_inst_options(inst.options());
set_extra_reg(inst.extra_reg());
return _emit_op_array(inst.inst_id(), operands, op_count);
}
//! \}
@@ -774,9 +798,9 @@ public:
//! \{
//! Emits an instruction - all 6 operands must be defined.
ASMJIT_API virtual Error _emit(InstId instId, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* oExt);
ASMJIT_API virtual Error _emit(InstId inst_id, const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext);
//! Emits instruction having operands stored in array.
ASMJIT_API virtual Error _emitOpArray(InstId instId, const Operand_* operands, size_t opCount);
ASMJIT_API virtual Error _emit_op_array(InstId inst_id, const Operand_* operands, size_t op_count);
//! \}
//! \endcond
@@ -785,11 +809,11 @@ public:
//! \{
//! Emits a function prolog described by the given function `frame`.
ASMJIT_API Error emitProlog(const FuncFrame& frame);
ASMJIT_API Error emit_prolog(const FuncFrame& frame);
//! Emits a function epilog described by the given function `frame`.
ASMJIT_API Error emitEpilog(const FuncFrame& frame);
ASMJIT_API Error emit_epilog(const FuncFrame& frame);
//! Emits code that reassigns function `frame` arguments to the given `args`.
ASMJIT_API Error emitArgsAssignment(const FuncFrame& frame, const FuncArgsAssignment& args);
ASMJIT_API Error emit_args_assignment(const FuncFrame& frame, const FuncArgsAssignment& args);
//! \}
@@ -801,7 +825,7 @@ public:
//! The sequence that is used to fill the gap between the aligned location and the current location depends on the
//! align `mode`, see \ref AlignMode. The `alignment` argument specifies alignment in bytes, so for example when
//! it's `32` it means that the code buffer will be aligned to `32` bytes.
ASMJIT_API virtual Error align(AlignMode alignMode, uint32_t alignment);
ASMJIT_API virtual Error align(AlignMode align_mode, uint32_t alignment);
//! \}
@@ -809,55 +833,55 @@ public:
//! \{
//! Embeds raw data into the \ref CodeBuffer.
ASMJIT_API virtual Error embed(const void* data, size_t dataSize);
ASMJIT_API virtual Error embed(const void* data, size_t data_size);
//! Embeds a typed data array.
//!
//! This is the most flexible function for embedding data as it allows to:
//!
//! - Assign a `typeId` to the data, so the emitter knows the type of items stored in `data`. Binary data should
//! - Assign a `type_id` to the data, so the emitter knows the type of items stored in `data`. Binary data should
//! use \ref TypeId::kUInt8.
//!
//! - Repeat the given data `repeatCount` times, so the data can be used as a fill pattern for example, or as a
//! - Repeat the given data `repeat_count` times, so the data can be used as a fill pattern for example, or as a
//! pattern used by SIMD instructions.
ASMJIT_API virtual Error embedDataArray(TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1);
ASMJIT_API virtual Error embed_data_array(TypeId type_id, const void* data, size_t item_count, size_t repeat_count = 1);
//! Embeds int8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt8(int8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt8, &value, 1, repeatCount); }
//! Embeds uint8_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt8(uint8_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt8, &value, 1, repeatCount); }
//! Embeds int16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt16(int16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt16, &value, 1, repeatCount); }
//! Embeds uint16_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt16(uint16_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt16, &value, 1, repeatCount); }
//! Embeds int32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt32(int32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt32, &value, 1, repeatCount); }
//! Embeds uint32_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt32(uint32_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt32, &value, 1, repeatCount); }
//! Embeds int64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedInt64(int64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kInt64, &value, 1, repeatCount); }
//! Embeds uint64_t `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedUInt64(uint64_t value, size_t repeatCount = 1) { return embedDataArray(TypeId::kUInt64, &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedFloat(float value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<float>::kTypeId), &value, 1, repeatCount); }
//! Embeds a floating point `value` repeated by `repeatCount`.
ASMJIT_INLINE_NODEBUG Error embedDouble(double value, size_t repeatCount = 1) { return embedDataArray(TypeId(TypeUtils::TypeIdOfT<double>::kTypeId), &value, 1, repeatCount); }
//! Embeds int8_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_int8(int8_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kInt8, &value, 1, repeat_count); }
//! Embeds uint8_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_uint8(uint8_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kUInt8, &value, 1, repeat_count); }
//! Embeds int16_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_int16(int16_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kInt16, &value, 1, repeat_count); }
//! Embeds uint16_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_uint16(uint16_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kUInt16, &value, 1, repeat_count); }
//! Embeds int32_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_int32(int32_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kInt32, &value, 1, repeat_count); }
//! Embeds uint32_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_uint32(uint32_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kUInt32, &value, 1, repeat_count); }
//! Embeds int64_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_int64(int64_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kInt64, &value, 1, repeat_count); }
//! Embeds uint64_t `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_uint64(uint64_t value, size_t repeat_count = 1) { return embed_data_array(TypeId::kUInt64, &value, 1, repeat_count); }
//! Embeds a floating point `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_float(float value, size_t repeat_count = 1) { return embed_data_array(TypeId(TypeUtils::TypeIdOfT<float>::kTypeId), &value, 1, repeat_count); }
//! Embeds a floating point `value` repeated by `repeat_count`.
ASMJIT_INLINE_NODEBUG Error embed_double(double value, size_t repeat_count = 1) { return embed_data_array(TypeId(TypeUtils::TypeIdOfT<double>::kTypeId), &value, 1, repeat_count); }
//! Embeds a constant pool at the current offset by performing the following:
//! 1. Aligns by using AlignMode::kData to the minimum `pool` alignment.
//! 2. Binds the ConstPool label so it's bound to an aligned location.
//! 3. Emits ConstPool content.
ASMJIT_API virtual Error embedConstPool(const Label& label, const ConstPool& pool);
ASMJIT_API virtual Error embed_const_pool(const Label& label, const ConstPool& pool);
//! Embeds an absolute `label` address as data.
//!
//! The `dataSize` is an optional argument that can be used to specify the size of the address data. If it's zero
//! The `data_size` is an optional argument that can be used to specify the size of the address data. If it's zero
//! (default) the address size is deduced from the target architecture (either 4 or 8 bytes).
ASMJIT_API virtual Error embedLabel(const Label& label, size_t dataSize = 0);
ASMJIT_API virtual Error embed_label(const Label& label, size_t data_size = 0);
//! Embeds a delta (distance) between the `label` and `base` calculating it as `label - base`. This function was
//! designed to make it easier to embed lookup tables where each index is a relative distance of two labels.
ASMJIT_API virtual Error embedLabelDelta(const Label& label, const Label& base, size_t dataSize = 0);
ASMJIT_API virtual Error embed_label_delta(const Label& label, const Label& base, size_t data_size = 0);
//! \}
@@ -878,26 +902,26 @@ public:
//! \{
//! Called after the emitter was attached to `CodeHolder`.
ASMJIT_API virtual Error onAttach(CodeHolder& code) noexcept;
ASMJIT_API virtual Error on_attach(CodeHolder& code) noexcept;
//! Called after the emitter was detached from `CodeHolder`.
ASMJIT_API virtual Error onDetach(CodeHolder& code) noexcept;
ASMJIT_API virtual Error on_detach(CodeHolder& code) noexcept;
//! Called when CodeHolder is reinitialized when the emitter is attached.
ASMJIT_API virtual Error onReinit(CodeHolder& code) noexcept;
ASMJIT_API virtual Error on_reinit(CodeHolder& code) noexcept;
//! Called when \ref CodeHolder has updated an important setting, which involves the following:
//!
//! - \ref Logger has been changed (\ref CodeHolder::setLogger() has been called).
//! - \ref Logger has been changed (\ref CodeHolder::set_logger() has been called).
//!
//! - \ref ErrorHandler has been changed (\ref CodeHolder::setErrorHandler() has been called).
//! - \ref ErrorHandler has been changed (\ref CodeHolder::set_error_handler() has been called).
//!
//! This function ensures that the settings are properly propagated from \ref CodeHolder to the emitter.
//!
//! \note This function is virtual and can be overridden, however, if you do so, always call \ref
//! BaseEmitter::onSettingsUpdated() within your own implementation to ensure that the emitter is
//! BaseEmitter::on_settings_updated() within your own implementation to ensure that the emitter is
//! in a consistent state.
ASMJIT_API virtual void onSettingsUpdated() noexcept;
ASMJIT_API virtual void on_settings_updated() noexcept;
//! \}
};

View File

@@ -16,18 +16,18 @@ namespace EmitterUtils {
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept {
ASMJIT_ASSERT(binSize >= offsetSize);
Error finish_formatted_line(String& sb, const FormatOptions& format_options, const uint8_t* bin_data, size_t bin_size, size_t offset_size, size_t imm_size, const char* comment) noexcept {
ASMJIT_ASSERT(bin_size >= offset_size);
const size_t kNoBinSize = SIZE_MAX;
size_t commentSize = comment ? Support::strLen(comment, Globals::kMaxCommentSize) : 0;
size_t comment_size = comment ? Support::str_nlen(comment, Globals::kMaxCommentSize) : 0;
if ((binSize != 0 && binSize != kNoBinSize) || commentSize) {
if ((bin_size != 0 && bin_size != kNoBinSize) || comment_size) {
char sep = ';';
size_t padding = Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t padding = Formatter::padding_from_options(format_options, FormatPaddingGroup::kRegularLine);
for (size_t i = (binSize == kNoBinSize); i < 2; i++) {
ASMJIT_PROPAGATE(sb.padEnd(padding));
for (size_t i = (bin_size == kNoBinSize); i < 2; i++) {
ASMJIT_PROPAGATE(sb.pad_end(padding));
if (sep) {
ASMJIT_PROPAGATE(sb.append(sep));
@@ -36,90 +36,90 @@ Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const
// Append binary data or comment.
if (i == 0) {
ASMJIT_PROPAGATE(sb.appendHex(binData, binSize - offsetSize - immSize));
ASMJIT_PROPAGATE(sb.appendChars('.', offsetSize * 2));
ASMJIT_PROPAGATE(sb.appendHex(binData + binSize - immSize, immSize));
if (commentSize == 0) break;
ASMJIT_PROPAGATE(sb.append_hex(bin_data, bin_size - offset_size - imm_size));
ASMJIT_PROPAGATE(sb.append_chars('.', offset_size * 2));
ASMJIT_PROPAGATE(sb.append_hex(bin_data + bin_size - imm_size, imm_size));
if (comment_size == 0) break;
}
else {
ASMJIT_PROPAGATE(sb.append(comment, commentSize));
ASMJIT_PROPAGATE(sb.append(comment, comment_size));
}
sep = '|';
padding += Formatter::paddingFromOptions(formatOptions, FormatPaddingGroup::kMachineCode);
padding += Formatter::padding_from_options(format_options, FormatPaddingGroup::kMachineCode);
}
}
return sb.append('\n');
}
void logLabelBound(BaseAssembler* self, const Label& label) noexcept {
void log_label_bound(BaseAssembler* self, const Label& label) noexcept {
Logger* logger = self->logger();
StringTmp<512> sb;
size_t binSize = logger->hasFlag(FormatFlags::kMachineCode) ? size_t(0) : SIZE_MAX;
size_t bin_size = logger->has_flag(FormatFlags::kMachineCode) ? size_t(0) : SIZE_MAX;
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kLabel));
Formatter::formatLabel(sb, logger->flags(), self, label.id());
sb.append_chars(' ', logger->indentation(FormatIndentationGroup::kLabel));
Formatter::format_label(sb, logger->flags(), self, label.id());
sb.append(':');
finishFormattedLine(sb, logger->options(), nullptr, binSize, 0, 0, self->_inlineComment);
finish_formatted_line(sb, logger->options(), nullptr, bin_size, 0, 0, self->_inline_comment);
logger->log(sb.data(), sb.size());
}
void logInstructionEmitted(
void log_instruction_emitted(
BaseAssembler* self,
InstId instId,
InstId inst_id,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor) {
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext,
uint32_t rel_size, uint32_t imm_size, uint8_t* after_cursor) {
Logger* logger = self->logger();
ASMJIT_ASSERT(logger != nullptr);
StringTmp<256> sb;
FormatFlags formatFlags = logger->flags();
FormatFlags format_flags = logger->flags();
uint8_t* beforeCursor = self->bufferPtr();
intptr_t emittedSize = (intptr_t)(afterCursor - beforeCursor);
uint8_t* before_cursor = self->buffer_ptr();
intptr_t emitted_size = (intptr_t)(after_cursor - before_cursor);
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Operand_ op_array[Globals::kMaxOpCount];
op_array_from_emit_args(op_array, o0, o1, o2, op_ext);
sb.appendChars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.formatInstruction(sb, formatFlags, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
sb.append_chars(' ', logger->indentation(FormatIndentationGroup::kCode));
self->_funcs.format_instruction(sb, format_flags, self, self->arch(), BaseInst(inst_id, options, self->extra_reg()), Span<Operand_>(op_array, Globals::kMaxOpCount));
if (Support::test(formatFlags, FormatFlags::kMachineCode)) {
finishFormattedLine(sb, logger->options(), self->bufferPtr(), size_t(emittedSize), relSize, immSize, self->inlineComment());
if (Support::test(format_flags, FormatFlags::kMachineCode)) {
finish_formatted_line(sb, logger->options(), self->buffer_ptr(), size_t(emitted_size), rel_size, imm_size, self->inline_comment());
}
else {
finishFormattedLine(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inlineComment());
finish_formatted_line(sb, logger->options(), nullptr, SIZE_MAX, 0, 0, self->inline_comment());
}
logger->log(sb);
}
Error logInstructionFailed(
Error log_instruction_failed(
BaseEmitter* self,
Error err,
InstId instId,
InstId inst_id,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) {
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) {
StringTmp<256> sb;
sb.append(DebugUtils::errorAsString(err));
sb.append(DebugUtils::error_as_string(err));
sb.append(": ");
Operand_ opArray[Globals::kMaxOpCount];
opArrayFromEmitArgs(opArray, o0, o1, o2, opExt);
Operand_ op_array[Globals::kMaxOpCount];
op_array_from_emit_args(op_array, o0, o1, o2, op_ext);
self->_funcs.formatInstruction(sb, FormatFlags::kRegType, self, self->arch(), BaseInst(instId, options, self->extraReg()), opArray, Globals::kMaxOpCount);
self->_funcs.format_instruction(sb, FormatFlags::kRegType, self, self->arch(), BaseInst(inst_id, options, self->extra_reg()), Span<Operand_>(op_array, Globals::kMaxOpCount));
if (self->inlineComment()) {
if (self->inline_comment()) {
sb.append(" ; ");
sb.append(self->inlineComment());
sb.append(self->inline_comment());
}
self->resetState();
return self->reportError(err, sb.data());
self->reset_state();
return self->report_error(err, sb.data());
}
#endif

View File

@@ -23,7 +23,7 @@ namespace EmitterUtils {
//! Default paddings used by Emitter utils and Formatter.
static constexpr Operand noExt[3] = { {}, {}, {} };
static constexpr Operand no_ext[3] = { {}, {}, {} };
enum kOpIndex : uint32_t {
kOp3 = 0,
@@ -32,51 +32,51 @@ enum kOpIndex : uint32_t {
};
[[nodiscard]]
static ASMJIT_INLINE uint32_t opCountFromEmitArgs(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
uint32_t opCount = 0;
static ASMJIT_INLINE uint32_t op_count_from_emit_args(const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) noexcept {
uint32_t op_count = 0;
if (opExt[kOp3].isNone()) {
if (!o0.isNone()) opCount = 1;
if (!o1.isNone()) opCount = 2;
if (!o2.isNone()) opCount = 3;
if (op_ext[kOp3].is_none()) {
if (!o0.is_none()) op_count = 1;
if (!o1.is_none()) op_count = 2;
if (!o2.is_none()) op_count = 3;
}
else {
opCount = 4;
if (!opExt[kOp4].isNone()) {
opCount = 5 + uint32_t(!opExt[kOp5].isNone());
op_count = 4;
if (!op_ext[kOp4].is_none()) {
op_count = 5 + uint32_t(!op_ext[kOp5].is_none());
}
}
return opCount;
return op_count;
}
static ASMJIT_INLINE void opArrayFromEmitArgs(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt) noexcept {
dst[0].copyFrom(o0);
dst[1].copyFrom(o1);
dst[2].copyFrom(o2);
dst[3].copyFrom(opExt[kOp3]);
dst[4].copyFrom(opExt[kOp4]);
dst[5].copyFrom(opExt[kOp5]);
static ASMJIT_INLINE void op_array_from_emit_args(Operand_ dst[Globals::kMaxOpCount], const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext) noexcept {
dst[0].copy_from(o0);
dst[1].copy_from(o1);
dst[2].copy_from(o2);
dst[3].copy_from(op_ext[kOp3]);
dst[4].copy_from(op_ext[kOp4]);
dst[5].copy_from(op_ext[kOp5]);
}
#ifndef ASMJIT_NO_LOGGING
Error finishFormattedLine(String& sb, const FormatOptions& formatOptions, const uint8_t* binData, size_t binSize, size_t offsetSize, size_t immSize, const char* comment) noexcept;
Error finish_formatted_line(String& sb, const FormatOptions& format_options, const uint8_t* bin_data, size_t bin_size, size_t offset_size, size_t imm_size, const char* comment) noexcept;
void logLabelBound(BaseAssembler* self, const Label& label) noexcept;
void log_label_bound(BaseAssembler* self, const Label& label) noexcept;
void logInstructionEmitted(
void log_instruction_emitted(
BaseAssembler* self,
InstId instId,
InstId inst_id,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt,
uint32_t relSize, uint32_t immSize, uint8_t* afterCursor);
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext,
uint32_t rel_size, uint32_t imm_size, uint8_t* after_cursor);
Error logInstructionFailed(
Error log_instruction_failed(
BaseEmitter* self,
Error err,
InstId instId,
InstId inst_id,
InstOptions options,
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* opExt);
const Operand_& o0, const Operand_& o1, const Operand_& o2, const Operand_* op_ext);
#endif
}

View File

@@ -21,21 +21,21 @@ ASMJIT_BEGIN_NAMESPACE
//
// - 32-bit - Stack must be aligned to 8 bytes.
// - 64-bit - Stack must be aligned to 16 bytes (hardware requirement).
uint32_t Environment::stackAlignment() const noexcept {
if (is64Bit()) {
uint32_t Environment::stack_alignment() const noexcept {
if (is_64bit()) {
// Assume 16-byte alignment on any 64-bit target.
return 16;
}
else {
// The following platforms use 16-byte alignment in 32-bit mode.
if (isPlatformLinux() ||
isPlatformBSD() ||
isPlatformApple() ||
isPlatformHaiku()) {
if (is_platform_linux() ||
is_platform_bsd() ||
is_platform_apple() ||
is_platform_haiku()) {
return 16u;
}
if (isFamilyARM()) {
if (is_family_arm()) {
return 8;
}

View File

@@ -205,17 +205,17 @@ public:
//! Architecture.
Arch _arch = Arch::kUnknown;
//! Sub-architecture type.
SubArch _subArch = SubArch::kUnknown;
SubArch _sub_arch = SubArch::kUnknown;
//! Vendor type.
Vendor _vendor = Vendor::kUnknown;
//! Platform.
Platform _platform = Platform::kUnknown;
//! Platform ABI.
PlatformABI _platformABI = PlatformABI::kUnknown;
PlatformABI _platform_abi = PlatformABI::kUnknown;
//! Object format.
ObjectFormat _objectFormat = ObjectFormat::kUnknown;
ObjectFormat _object_format = ObjectFormat::kUnknown;
//! Floating point ABI.
FloatABI _floatABI = FloatABI::kHardFloat;
FloatABI _float_abi = FloatABI::kHardFloat;
//! Reserved for future use, must be zero.
uint8_t _reserved = 0;
@@ -229,23 +229,23 @@ public:
//! Creates a copy of `other` instance.
ASMJIT_INLINE_CONSTEXPR Environment(const Environment& other) noexcept = default;
//! Creates \ref Environment initialized to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`.
//! Creates \ref Environment initialized to `arch`, `sub_arch`, `vendor`, `platform`, `platform_abi`, `object_format`,
//! and `float_abi`.
ASMJIT_INLINE_CONSTEXPR explicit Environment(
Arch arch,
SubArch subArch = SubArch::kUnknown,
SubArch sub_arch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown,
FloatABI floatABI = FloatABI::kHardFloat) noexcept
PlatformABI platform_abi = PlatformABI::kUnknown,
ObjectFormat object_format = ObjectFormat::kUnknown,
FloatABI float_abi = FloatABI::kHardFloat) noexcept
: _arch(arch),
_subArch(subArch),
_sub_arch(sub_arch),
_vendor(vendor),
_platform(platform),
_platformABI(platformABI),
_objectFormat(objectFormat),
_floatABI(floatABI) {}
_platform_abi(platform_abi),
_object_format(object_format),
_float_abi(float_abi) {}
//! Returns the host environment constructed from preprocessor macros defined by the compiler.
//!
@@ -277,7 +277,7 @@ public:
//!
//! Returns true if all members are zero, and thus unknown.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool empty() const noexcept {
ASMJIT_INLINE_NODEBUG bool is_empty() const noexcept {
// Unfortunately compilers won't optimize fields are checked one by one...
return _packed() == 0;
}
@@ -285,7 +285,7 @@ public:
//! Tests whether the environment is initialized, which means it must have
//! a valid architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isInitialized() const noexcept {
ASMJIT_INLINE_NODEBUG bool is_initialized() const noexcept {
return _arch != Arch::kUnknown;
}
@@ -309,7 +309,7 @@ public:
//! Returns the sub-architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG SubArch subArch() const noexcept { return _subArch; }
ASMJIT_INLINE_NODEBUG SubArch sub_arch() const noexcept { return _sub_arch; }
//! Returns vendor.
[[nodiscard]]
@@ -321,132 +321,132 @@ public:
//! Returns target's ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG PlatformABI platformABI() const noexcept { return _platformABI; }
ASMJIT_INLINE_NODEBUG PlatformABI platform_abi() const noexcept { return _platform_abi; }
//! Returns target's object format.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG ObjectFormat objectFormat() const noexcept { return _objectFormat; }
ASMJIT_INLINE_NODEBUG ObjectFormat object_format() const noexcept { return _object_format; }
//! Returns floating point ABI.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG FloatABI floatABI() const noexcept { return _floatABI; }
ASMJIT_INLINE_NODEBUG FloatABI float_abi() const noexcept { return _float_abi; }
//! Initializes \ref Environment to `arch`, `subArch`, `vendor`, `platform`, `platformABI`, `objectFormat`,
//! and `floatABI`.
//! Initializes \ref Environment to `arch`, `sub_arch`, `vendor`, `platform`, `platform_abi`, `object_format`,
//! and `float_abi`.
inline void init(
Arch arch,
SubArch subArch = SubArch::kUnknown,
SubArch sub_arch = SubArch::kUnknown,
Vendor vendor = Vendor::kUnknown,
Platform platform = Platform::kUnknown,
PlatformABI platformABI = PlatformABI::kUnknown,
ObjectFormat objectFormat = ObjectFormat::kUnknown,
FloatABI floatABI = FloatABI::kHardFloat) noexcept {
PlatformABI platform_abi = PlatformABI::kUnknown,
ObjectFormat object_format = ObjectFormat::kUnknown,
FloatABI float_abi = FloatABI::kHardFloat) noexcept {
_arch = arch;
_subArch = subArch;
_sub_arch = sub_arch;
_vendor = vendor;
_platform = platform;
_platformABI = platformABI;
_objectFormat = objectFormat;
_floatABI = floatABI;
_platform_abi = platform_abi;
_object_format = object_format;
_float_abi = float_abi;
_reserved = 0;
}
//! Tests whether this environment describes a 32-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX86() const noexcept { return _arch == Arch::kX86; }
ASMJIT_INLINE_NODEBUG bool is_arch_x86() const noexcept { return _arch == Arch::kX86; }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchX64() const noexcept { return _arch == Arch::kX64; }
ASMJIT_INLINE_NODEBUG bool is_arch_x64() const noexcept { return _arch == Arch::kX64; }
//! Tests whether this environment describes a 32-bit ARM.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchARM() const noexcept { return isArchARM(_arch); }
ASMJIT_INLINE_NODEBUG bool is_arch_arm() const noexcept { return is_arch_arm(_arch); }
//! Tests whether this environment describes a 32-bit ARM in THUMB mode.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchThumb() const noexcept { return isArchThumb(_arch); }
ASMJIT_INLINE_NODEBUG bool is_arch_thumb() const noexcept { return is_arch_thumb(_arch); }
//! Tests whether this environment describes a 64-bit X86.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchAArch64() const noexcept { return isArchAArch64(_arch); }
ASMJIT_INLINE_NODEBUG bool is_arch_aarch64() const noexcept { return is_arch_aarch64(_arch); }
//! Tests whether this environment describes a 32-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS32() const noexcept { return isArchMIPS32(_arch); }
ASMJIT_INLINE_NODEBUG bool is_arch_mips32() const noexcept { return is_arch_mips32(_arch); }
//! Tests whether this environment describes a 64-bit MIPS.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchMIPS64() const noexcept { return isArchMIPS64(_arch); }
ASMJIT_INLINE_NODEBUG bool is_arch_mips64() const noexcept { return is_arch_mips64(_arch); }
//! Tests whether this environment describes a 32-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV32() const noexcept { return _arch == Arch::kRISCV32; }
ASMJIT_INLINE_NODEBUG bool is_arch_riscv32() const noexcept { return _arch == Arch::kRISCV32; }
//! Tests whether this environment describes a 64-bit RISC-V.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isArchRISCV64() const noexcept { return _arch == Arch::kRISCV64; }
ASMJIT_INLINE_NODEBUG bool is_arch_riscv64() const noexcept { return _arch == Arch::kRISCV64; }
//! Tests whether the architecture is 32-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is32Bit() const noexcept { return is32Bit(_arch); }
ASMJIT_INLINE_NODEBUG bool is_32bit() const noexcept { return is_32bit(_arch); }
//! Tests whether the architecture is 64-bit.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool is64Bit() const noexcept { return is64Bit(_arch); }
ASMJIT_INLINE_NODEBUG bool is_64bit() const noexcept { return is_64bit(_arch); }
//! Tests whether the architecture is little endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isLittleEndian() const noexcept { return isLittleEndian(_arch); }
ASMJIT_INLINE_NODEBUG bool is_little_endian() const noexcept { return is_little_endian(_arch); }
//! Tests whether the architecture is big endian.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isBigEndian() const noexcept { return isBigEndian(_arch); }
ASMJIT_INLINE_NODEBUG bool is_big_endian() const noexcept { return is_big_endian(_arch); }
//! Tests whether this architecture is of X86 family.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyX86() const noexcept { return isFamilyX86(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_x86() const noexcept { return is_family_x86(_arch); }
//! Tests whether this architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyARM() const noexcept { return isFamilyARM(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_arm() const noexcept { return is_family_arm(_arch); }
//! Tests whether this architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch32() const noexcept { return isFamilyAArch32(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_aarch32() const noexcept { return is_family_aarch32(_arch); }
//! Tests whether this architecture family is AArch64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyAArch64() const noexcept { return isFamilyAArch64(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_aarch64() const noexcept { return is_family_aarch64(_arch); }
//! Tests whether this architecture family is MISP or MIPS64.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyMIPS() const noexcept { return isFamilyMIPS(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_mips() const noexcept { return is_family_mips(_arch); }
//! Tests whether this architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isFamilyRISCV() const noexcept { return isFamilyRISCV(_arch); }
ASMJIT_INLINE_NODEBUG bool is_family_riscv() const noexcept { return is_family_riscv(_arch); }
//! Tests whether the environment platform is Windows.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformWindows() const noexcept { return _platform == Platform::kWindows; }
ASMJIT_INLINE_NODEBUG bool is_platform_windows() const noexcept { return _platform == Platform::kWindows; }
//! Tests whether the environment platform is Linux.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformLinux() const noexcept { return _platform == Platform::kLinux; }
ASMJIT_INLINE_NODEBUG bool is_platform_linux() const noexcept { return _platform == Platform::kLinux; }
//! Tests whether the environment platform is Hurd.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHurd() const noexcept { return _platform == Platform::kHurd; }
ASMJIT_INLINE_NODEBUG bool is_platform_hurd() const noexcept { return _platform == Platform::kHurd; }
//! Tests whether the environment platform is Haiku.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformHaiku() const noexcept { return _platform == Platform::kHaiku; }
ASMJIT_INLINE_NODEBUG bool is_platform_haiku() const noexcept { return _platform == Platform::kHaiku; }
//! Tests whether the environment platform is any BSD.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformBSD() const noexcept {
ASMJIT_INLINE_NODEBUG bool is_platform_bsd() const noexcept {
return _platform == Platform::kFreeBSD ||
_platform == Platform::kOpenBSD ||
_platform == Platform::kNetBSD ||
@@ -455,7 +455,7 @@ public:
//! Tests whether the environment platform is any Apple platform (OSX, iOS, TVOS, WatchOS).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isPlatformApple() const noexcept {
ASMJIT_INLINE_NODEBUG bool is_platform_apple() const noexcept {
return _platform == Platform::kOSX ||
_platform == Platform::kIOS ||
_platform == Platform::kTVOS ||
@@ -464,39 +464,39 @@ public:
//! Tests whether the ABI is MSVC.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMSVC() const noexcept { return _platformABI == PlatformABI::kMSVC; }
ASMJIT_INLINE_NODEBUG bool is_msvc_abi() const noexcept { return _platform_abi == PlatformABI::kMSVC; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isGNU() const noexcept { return _platformABI == PlatformABI::kGNU; }
ASMJIT_INLINE_NODEBUG bool is_gnu_abi() const noexcept { return _platform_abi == PlatformABI::kGNU; }
//! Tests whether the ABI is GNU.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isDarwin() const noexcept { return _platformABI == PlatformABI::kDarwin; }
ASMJIT_INLINE_NODEBUG bool is_darwin_abi() const noexcept { return _platform_abi == PlatformABI::kDarwin; }
//! Returns a calculated stack alignment for this environment.
[[nodiscard]]
ASMJIT_API uint32_t stackAlignment() const noexcept;
ASMJIT_API uint32_t stack_alignment() const noexcept;
//! Returns a native register size of this architecture.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t registerSize() const noexcept { return registerSizeFromArch(_arch); }
ASMJIT_INLINE_NODEBUG uint32_t register_size() const noexcept { return reg_size_of_arch(_arch); }
//! Sets the architecture to `arch`.
ASMJIT_INLINE_NODEBUG void setArch(Arch arch) noexcept { _arch = arch; }
//! Sets the sub-architecture to `subArch`.
ASMJIT_INLINE_NODEBUG void setSubArch(SubArch subArch) noexcept { _subArch = subArch; }
ASMJIT_INLINE_NODEBUG void set_arch(Arch arch) noexcept { _arch = arch; }
//! Sets the sub-architecture to `sub_arch`.
ASMJIT_INLINE_NODEBUG void set_sub_arch(SubArch sub_arch) noexcept { _sub_arch = sub_arch; }
//! Sets the vendor to `vendor`.
ASMJIT_INLINE_NODEBUG void setVendor(Vendor vendor) noexcept { _vendor = vendor; }
ASMJIT_INLINE_NODEBUG void set_vendor(Vendor vendor) noexcept { _vendor = vendor; }
//! Sets the platform to `platform`.
ASMJIT_INLINE_NODEBUG void setPlatform(Platform platform) noexcept { _platform = platform; }
//! Sets the ABI to `platformABI`.
ASMJIT_INLINE_NODEBUG void setPlatformABI(PlatformABI platformABI) noexcept { _platformABI = platformABI; }
//! Sets the object format to `objectFormat`.
ASMJIT_INLINE_NODEBUG void setObjectFormat(ObjectFormat objectFormat) noexcept { _objectFormat = objectFormat; }
ASMJIT_INLINE_NODEBUG void set_platform(Platform platform) noexcept { _platform = platform; }
//! Sets the ABI to `platform_abi`.
ASMJIT_INLINE_NODEBUG void set_platform_abi(PlatformABI platform_abi) noexcept { _platform_abi = platform_abi; }
//! Sets the object format to `object_format`.
ASMJIT_INLINE_NODEBUG void set_object_format(ObjectFormat object_format) noexcept { _object_format = object_format; }
//! Sets floating point ABI to `floatABI`.
ASMJIT_INLINE_NODEBUG void setFloatABI(FloatABI floatABI) noexcept { _floatABI = floatABI; }
//! Sets floating point ABI to `float_abi`.
ASMJIT_INLINE_NODEBUG void set_float_abi(FloatABI float_abi) noexcept { _float_abi = float_abi; }
//! \}
@@ -504,109 +504,109 @@ public:
//! \{
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isDefinedArch(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_defined_arch(Arch arch) noexcept {
return uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isValidArch(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_valid_arch(Arch arch) noexcept {
return arch != Arch::kUnknown && uint32_t(arch) <= uint32_t(Arch::kMaxValue);
}
//! Tests whether the given architecture `arch` is 32-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is32Bit(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_32bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == uint32_t(Arch::k32BitMask);
}
//! Tests whether the given architecture `arch` is 64-bit.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool is64Bit(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_64bit(Arch arch) noexcept {
return (uint32_t(arch) & uint32_t(Arch::k32BitMask)) == 0;
}
//! Tests whether the given architecture `arch` is little endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isLittleEndian(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_little_endian(Arch arch) noexcept {
return uint32_t(arch) < uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture `arch` is big endian.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isBigEndian(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_big_endian(Arch arch) noexcept {
return uint32_t(arch) >= uint32_t(Arch::kBigEndian);
}
//! Tests whether the given architecture is Thumb or Thumb_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchThumb(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_arch_thumb(Arch arch) noexcept {
return arch == Arch::kThumb || arch == Arch::kThumb_BE;
}
//! Tests whether the given architecture is ARM or ARM_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchARM(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_arch_arm(Arch arch) noexcept {
return arch == Arch::kARM || arch == Arch::kARM_BE;
}
//! Tests whether the given architecture is AArch64 or AArch64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchAArch64(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_arch_aarch64(Arch arch) noexcept {
return arch == Arch::kAArch64 || arch == Arch::kAArch64_BE;
}
//! Tests whether the given architecture is MIPS32_LE or MIPS32_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS32(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_arch_mips32(Arch arch) noexcept {
return arch == Arch::kMIPS32_LE || arch == Arch::kMIPS32_BE;
}
//! Tests whether the given architecture is MIPS64_LE or MIPS64_BE.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isArchMIPS64(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_arch_mips64(Arch arch) noexcept {
return arch == Arch::kMIPS64_LE || arch == Arch::kMIPS64_BE;
}
//! Tests whether the given architecture family is X86 or X64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyX86(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_family_x86(Arch arch) noexcept {
return arch == Arch::kX86 || arch == Arch::kX64;
}
//! Tests whether the given architecture family is AArch32 (ARM or THUMB).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch32(Arch arch) noexcept {
return isArchARM(arch) || isArchThumb(arch);
static ASMJIT_INLINE_NODEBUG bool is_family_aarch32(Arch arch) noexcept {
return is_arch_arm(arch) || is_arch_thumb(arch);
}
//! Tests whether the given architecture family is AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyAArch64(Arch arch) noexcept {
return isArchAArch64(arch);
static ASMJIT_INLINE_NODEBUG bool is_family_aarch64(Arch arch) noexcept {
return is_arch_aarch64(arch);
}
//! Tests whether the given architecture family is ARM, THUMB, or AArch64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyARM(Arch arch) noexcept {
return isFamilyAArch32(arch) || isFamilyAArch64(arch);
static ASMJIT_INLINE_NODEBUG bool is_family_arm(Arch arch) noexcept {
return is_family_aarch32(arch) || is_family_aarch64(arch);
}
//! Tests whether the given architecture family is MIPS or MIPS64.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyMIPS(Arch arch) noexcept {
return isArchMIPS32(arch) || isArchMIPS64(arch);
static ASMJIT_INLINE_NODEBUG bool is_family_mips(Arch arch) noexcept {
return is_arch_mips32(arch) || is_arch_mips64(arch);
}
//! Tests whether the given architecture family is RISC-V (both 32-bit and 64-bit).
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG bool isFamilyRISCV(Arch arch) noexcept {
static ASMJIT_INLINE_NODEBUG bool is_family_riscv(Arch arch) noexcept {
return arch == Arch::kRISCV32 || arch == Arch::kRISCV64;
}
//! Returns a native general purpose register size from the given architecture.
[[nodiscard]]
static ASMJIT_INLINE_NODEBUG uint32_t registerSizeFromArch(Arch arch) noexcept {
return is32Bit(arch) ? 4u : 8u;
static ASMJIT_INLINE_NODEBUG uint32_t reg_size_of_arch(Arch arch) noexcept {
return is_32bit(arch) ? 4u : 8u;
}
//! \}

View File

@@ -5,14 +5,15 @@
#include "../core/api-build_p.h"
#include "../core/errorhandler.h"
#include "../core/support.h"
ASMJIT_BEGIN_NAMESPACE
ErrorHandler::ErrorHandler() noexcept {}
ErrorHandler::~ErrorHandler() noexcept {}
void ErrorHandler::handleError(Error err, const char* message, BaseEmitter* origin) {
DebugUtils::unused(err, message, origin);
void ErrorHandler::handle_error(Error err, const char* message, BaseEmitter* origin) {
Support::maybe_unused(err, message, origin);
}
ASMJIT_END_NAMESPACE

View File

@@ -17,7 +17,7 @@ class BaseEmitter;
//! Error handler can be used to override the default behavior of error handling.
//!
//! It's available to all classes that inherit `BaseEmitter`. Override \ref ErrorHandler::handleError() to implement
//! It's available to all classes that inherit `BaseEmitter`. Override \ref ErrorHandler::handle_error() to implement
//! your own error handler.
//!
//! The following use-cases are supported:
@@ -27,11 +27,11 @@ class BaseEmitter;
//! - Throw an exception. AsmJit doesn't use exceptions and is completely exception-safe, but it's perfectly legal
//! to throw an exception from the error handler.
//! - Use plain old C's `setjmp()` and `longjmp()`. Asmjit always puts Assembler, Builder and Compiler to
//! a consistent state before calling \ref handleError(), so `longjmp()` can be used without issues to cancel the
//! a consistent state before calling \ref handle_error(), so `longjmp()` can be used without issues to cancel the
//! code generation if an error occurred. This method can be used if exception handling in your project is turned
//! off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref Zone memory and the
//! ownership of memory it allocates always ends with the instance that allocated it. If using this approach please
//! never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//! off and you still want some comfort. In most cases it should be safe as AsmJit uses \ref Arena allocator and
//! the ownership of allocated memory it allocates always ends with the instance that allocated it. If using this
//! approach please never jump outside the life-time of \ref CodeHolder and \ref BaseEmitter.
//!
//! \ref ErrorHandler can be attached to \ref CodeHolder or \ref BaseEmitter, which has a priority. The example below
//! uses error handler that just prints the error, but lets AsmJit continue:
@@ -48,9 +48,9 @@ class BaseEmitter;
//! public:
//! Error err;
//!
//! inline SimpleErrorHandler() : err(kErrorOk) {}
//! inline SimpleErrorHandler() : err(Error::kOk) {}
//!
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! void handle_error(Error err, const char* message, BaseEmitter* origin) override {
//! this->err = err;
//! fprintf(stderr, "ERROR: %s\n", message);
//! }
@@ -61,8 +61,8 @@ class BaseEmitter;
//! SimpleErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//! code.init(rt.environment(), rt.cpu_features());
//! code.set_error_handler(&eh);
//!
//! // Try to emit instruction that doesn't exist.
//! x86::Assembler a(&code);
@@ -107,7 +107,7 @@ class BaseEmitter;
//! class ThrowableErrorHandler : public ErrorHandler {
//! public:
//! // Throw is possible, functions that use ErrorHandler are never 'noexcept'.
//! void handleError(Error err, const char* message, BaseEmitter* origin) override {
//! void handle_error(Error err, const char* message, BaseEmitter* origin) override {
//! throw AsmJitException(err, message);
//! }
//! };
@@ -117,8 +117,8 @@ class BaseEmitter;
//! ThrowableErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//! code.init(rt.environment(), rt.cpu_features());
//! code.set_error_handler(&eh);
//!
//! x86::Assembler a(&code);
//!
@@ -148,9 +148,9 @@ class BaseEmitter;
//!
//! class LongJmpErrorHandler : public asmjit::ErrorHandler {
//! public:
//! inline LongJmpErrorHandler() : err(asmjit::kErrorOk) {}
//! inline LongJmpErrorHandler() : err(asmjit::Error::kOk) {}
//!
//! void handleError(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! void handle_error(asmjit::Error err, const char* message, asmjit::BaseEmitter* origin) override {
//! this->err = err;
//! longjmp(state, 1);
//! }
@@ -166,8 +166,8 @@ class BaseEmitter;
//! LongJmpErrorHandler eh;
//!
//! CodeHolder code;
//! code.init(rt.environment(), rt.cpuFeatures());
//! code.setErrorHandler(&eh);
//! code.init(rt.environment(), rt.cpu_features());
//! code.set_error_handler(&eh);
//!
//! x86::Assembler a(&code);
//!
@@ -177,7 +177,7 @@ class BaseEmitter;
//! }
//! else {
//! Error err = eh.err;
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::errorAsString(err));
//! printf("ASMJIT ERROR: 0x%08X [%s]\n", err, DebugUtils::error_as_string(err));
//! }
//!
//! return 0;
@@ -212,10 +212,10 @@ public:
//! exception from your error handler if this way is the preferred way of handling errors in your project.
//!
//! 3. Using plain old C's `setjmp()` and `longjmp()`. Asmjit always puts `BaseEmitter` to a consistent state before
//! calling `handleError()` so `longjmp()` can be used without any issues to cancel the code generation if an
//! calling `handle_error()` so `longjmp()` can be used without any issues to cancel the code generation if an
//! error occurred. There is no difference between exceptions and `longjmp()` from AsmJit's perspective, however,
//! never jump outside of `CodeHolder` and `BaseEmitter` scope as you would leak memory.
ASMJIT_API virtual void handleError(Error err, const char* message, BaseEmitter* origin);
ASMJIT_API virtual void handle_error(Error err, const char* message, BaseEmitter* origin);
//! \}
};

View File

@@ -18,13 +18,13 @@ enum class OffsetType : uint8_t {
// Common Offset Formats
// ---------------------
//! A value having `_immBitCount` bits and shifted by `_immBitShift`.
//! A value having `_imm_bit_count` bits and shifted by `_imm_bit_shift`.
//!
//! This offset type is sufficient for many targets that store offset as a continuous set bits within an
//! instruction word / sequence of bytes.
kSignedOffset,
//! An unsigned value having `_immBitCount` bits and shifted by `_immBitShift`.
//! An unsigned value having `_imm_bit_count` bits and shifted by `_imm_bit_shift`.
kUnsignedOffset,
// AArch64 Specific Offset Formats
@@ -141,20 +141,20 @@ struct OffsetFormat {
//! Encoding flags.
uint8_t _flags;
//! Size of the region (in bytes) containing the offset value, if the offset value is part of an instruction,
//! otherwise it would be the same as `_valueSize`.
uint8_t _regionSize;
//! otherwise it would be the same as `_value_size`.
uint8_t _region_size;
//! Size of the offset value, in bytes (1, 2, 4, or 8).
uint8_t _valueSize;
uint8_t _value_size;
//! Offset of the offset value, in bytes, relative to the start of the region or data. Value offset would be
//! zero if both region size and value size are equal.
uint8_t _valueOffset;
uint8_t _value_offset;
//! Size of the offset immediate value in bits.
uint8_t _immBitCount;
uint8_t _imm_bit_count;
//! Shift of the offset immediate value in bits in the target word.
uint8_t _immBitShift;
uint8_t _imm_bit_shift;
//! Number of least significant bits to discard before writing the immediate to the destination. All discarded
//! bits must be zero otherwise the value is invalid.
uint8_t _immDiscardLsb;
uint8_t _imm_discard_lsb;
//! \}
@@ -169,7 +169,7 @@ struct OffsetFormat {
//!
//! If true, the offset itself is always positive and a separate U/N field is used to indicate the sign of the offset
//! (usually `U==1` means ADD, but sometimes `N==1` means negative offset, which implies SUB).
ASMJIT_INLINE_NODEBUG bool hasSignBit() const noexcept {
ASMJIT_INLINE_NODEBUG bool has_sign_bit() const noexcept {
return _type == OffsetType::kThumb32_ADR ||
_type == OffsetType::kAArch32_ADR ||
_type == OffsetType::kAArch32_U23_SignedOffset ||
@@ -182,70 +182,70 @@ struct OffsetFormat {
//! Returns the size of the region/instruction where the offset is encoded.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t regionSize() const noexcept { return _regionSize; }
ASMJIT_INLINE_NODEBUG uint32_t region_size() const noexcept { return _region_size; }
//! Returns the offset of the word relative to the start of the region where the offset is.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueOffset() const noexcept { return _valueOffset; }
ASMJIT_INLINE_NODEBUG uint32_t value_offset() const noexcept { return _value_offset; }
//! Returns the size of the data-type (word) that contains the offset, in bytes.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t valueSize() const noexcept { return _valueSize; }
ASMJIT_INLINE_NODEBUG uint32_t value_size() const noexcept { return _value_size; }
//! Returns the count of bits of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitCount() const noexcept { return _immBitCount; }
ASMJIT_INLINE_NODEBUG uint32_t imm_bit_count() const noexcept { return _imm_bit_count; }
//! Returns the bit-shift of the offset value in the data it's stored in.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immBitShift() const noexcept { return _immBitShift; }
ASMJIT_INLINE_NODEBUG uint32_t imm_bit_shift() const noexcept { return _imm_bit_shift; }
//! Returns the number of least significant bits of the offset value, that must be zero and that are not part of
//! the encoded data.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t immDiscardLsb() const noexcept { return _immDiscardLsb; }
ASMJIT_INLINE_NODEBUG uint32_t imm_discard_lsb() const noexcept { return _imm_discard_lsb; }
//! Resets this offset format to a simple data value of `dataSize` bytes.
//! Resets this offset format to a simple data value of `data_size` bytes.
//!
//! The region will be the same size as data and immediate bits would correspond to `dataSize * 8`. There will be
//! The region will be the same size as data and immediate bits would correspond to `data_size * 8`. There will be
//! no immediate bit shift or discarded bits.
inline void resetToSimpleValue(OffsetType type, size_t valueSize) noexcept {
ASMJIT_ASSERT(valueSize <= 8u);
inline void reset_to_simple_value(OffsetType type, size_t value_size) noexcept {
ASMJIT_ASSERT(value_size <= 8u);
_type = type;
_flags = uint8_t(0);
_regionSize = uint8_t(valueSize);
_valueSize = uint8_t(valueSize);
_valueOffset = uint8_t(0);
_immBitCount = uint8_t(valueSize * 8u);
_immBitShift = uint8_t(0);
_immDiscardLsb = uint8_t(0);
_region_size = uint8_t(value_size);
_value_size = uint8_t(value_size);
_value_offset = uint8_t(0);
_imm_bit_count = uint8_t(value_size * 8u);
_imm_bit_shift = uint8_t(0);
_imm_discard_lsb = uint8_t(0);
}
inline void resetToImmValue(OffsetType type, size_t valueSize, uint32_t immBitShift, uint32_t immBitCount, uint32_t immDiscardLsb) noexcept {
ASMJIT_ASSERT(valueSize <= 8u);
ASMJIT_ASSERT(immBitShift < valueSize * 8u);
ASMJIT_ASSERT(immBitCount <= 64u);
ASMJIT_ASSERT(immDiscardLsb <= 64u);
inline void reset_to_imm_value(OffsetType type, size_t value_size, uint32_t imm_bit_shift, uint32_t imm_bit_count, uint32_t imm_discard_lsb) noexcept {
ASMJIT_ASSERT(value_size <= 8u);
ASMJIT_ASSERT(imm_bit_shift < value_size * 8u);
ASMJIT_ASSERT(imm_bit_count <= 64u);
ASMJIT_ASSERT(imm_discard_lsb <= 64u);
_type = type;
_flags = uint8_t(0);
_regionSize = uint8_t(valueSize);
_valueSize = uint8_t(valueSize);
_valueOffset = uint8_t(0);
_immBitCount = uint8_t(immBitCount);
_immBitShift = uint8_t(immBitShift);
_immDiscardLsb = uint8_t(immDiscardLsb);
_region_size = uint8_t(value_size);
_value_size = uint8_t(value_size);
_value_offset = uint8_t(0);
_imm_bit_count = uint8_t(imm_bit_count);
_imm_bit_shift = uint8_t(imm_bit_shift);
_imm_discard_lsb = uint8_t(imm_discard_lsb);
}
inline void setRegion(size_t regionSize, size_t valueOffset) noexcept {
_regionSize = uint8_t(regionSize);
_valueOffset = uint8_t(valueOffset);
inline void set_region(size_t region_size, size_t value_offset) noexcept {
_region_size = uint8_t(region_size);
_value_offset = uint8_t(value_offset);
}
inline void setLeadingAndTrailingSize(size_t leadingSize, size_t trailingSize) noexcept {
_regionSize = uint8_t(leadingSize + trailingSize + _valueSize);
_valueOffset = uint8_t(leadingSize);
inline void set_leading_and_trailing_size(size_t leading_size, size_t trailing_size) noexcept {
_region_size = uint8_t(leading_size + trailing_size + _value_size);
_value_offset = uint8_t(leading_size);
}
//! \}
@@ -260,13 +260,13 @@ struct Fixup {
//! Next fixup in a single-linked list.
Fixup* next;
//! Section where the fixup comes from.
uint32_t sectionId;
uint32_t section_id;
//! Label id, relocation id, or \ref Globals::kInvalidId.
//!
//! \note Fixup that is used with a LabelEntry always uses relocation id here, however, when a fixup is turned
//! into unresolved and generally detached from LabelEntry, this field becomes a label identifier as unresolved
//! fixups won't reference a relocation. This is just a space optimization.
uint32_t labelOrRelocId;
uint32_t label_or_reloc_id;
//! Label offset relative to the start of the section where the unresolved link comes from.
size_t offset;
//! Inlined rel8/rel32.

View File

@@ -32,27 +32,27 @@ class VirtReg;
namespace Formatter {
Error formatVirtRegName(String& sb, const VirtReg* vReg) noexcept {
if (vReg->nameSize()) {
return sb.append(vReg->name(), vReg->nameSize());
Error format_virt_reg_name(String& sb, const VirtReg* virt_reg) noexcept {
if (virt_reg->name_size()) {
return sb.append(virt_reg->name(), virt_reg->name_size());
}
else {
return sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(vReg->id())));
return sb.append_format("%%%u", unsigned(Operand::virt_id_to_index(virt_reg->id())));
}
}
Error formatVirtRegNameWithPrefix(String& sb, const char* prefix, size_t prefixSize, const VirtReg* vReg) noexcept {
ASMJIT_PROPAGATE(sb.append(prefix, prefixSize));
Error format_virt_reg_name_with_prefix(String& sb, const char* prefix, size_t prefix_size, const VirtReg* v_reg) noexcept {
ASMJIT_PROPAGATE(sb.append(prefix, prefix_size));
if (vReg->nameSize()) {
return sb.append(vReg->name(), vReg->nameSize());
if (v_reg->name_size()) {
return sb.append(v_reg->name(), v_reg->name_size());
}
else {
return sb.appendFormat("%%%u", unsigned(Operand::virtIdToIndex(vReg->id())));
return sb.append_format("%%%u", unsigned(Operand::virt_id_to_index(v_reg->id())));
}
}
static const char wordNameTable[][8] = {
static const char word_name_table[][8] = {
"db",
"dw",
"dd",
@@ -70,305 +70,296 @@ static const char wordNameTable[][8] = {
};
Error formatTypeId(String& sb, TypeId typeId) noexcept {
if (typeId == TypeId::kVoid) {
Error format_type_id(String& sb, TypeId type_id) noexcept {
if (type_id == TypeId::kVoid) {
return sb.append("void");
}
if (!TypeUtils::isValid(typeId)) {
if (!TypeUtils::is_valid(type_id)) {
return sb.append("unknown");
}
const char* typeName = nullptr;
uint32_t typeSize = TypeUtils::sizeOf(typeId);
TypeId scalarType = TypeUtils::scalarOf(typeId);
const char* type_name = nullptr;
uint32_t type_size = TypeUtils::size_of(type_id);
TypeId scalar_type = TypeUtils::scalar_of(type_id);
switch (scalarType) {
case TypeId::kIntPtr : typeName = "intptr" ; break;
case TypeId::kUIntPtr: typeName = "uintptr"; break;
case TypeId::kInt8 : typeName = "int8" ; break;
case TypeId::kUInt8 : typeName = "uint8" ; break;
case TypeId::kInt16 : typeName = "int16" ; break;
case TypeId::kUInt16 : typeName = "uint16" ; break;
case TypeId::kInt32 : typeName = "int32" ; break;
case TypeId::kUInt32 : typeName = "uint32" ; break;
case TypeId::kInt64 : typeName = "int64" ; break;
case TypeId::kUInt64 : typeName = "uint64" ; break;
case TypeId::kFloat32: typeName = "float32"; break;
case TypeId::kFloat64: typeName = "float64"; break;
case TypeId::kFloat80: typeName = "float80"; break;
case TypeId::kMask8 : typeName = "mask8" ; break;
case TypeId::kMask16 : typeName = "mask16" ; break;
case TypeId::kMask32 : typeName = "mask32" ; break;
case TypeId::kMask64 : typeName = "mask64" ; break;
case TypeId::kMmx32 : typeName = "mmx32" ; break;
case TypeId::kMmx64 : typeName = "mmx64" ; break;
switch (scalar_type) {
case TypeId::kIntPtr : type_name = "intptr" ; break;
case TypeId::kUIntPtr: type_name = "uintptr"; break;
case TypeId::kInt8 : type_name = "int8" ; break;
case TypeId::kUInt8 : type_name = "uint8" ; break;
case TypeId::kInt16 : type_name = "int16" ; break;
case TypeId::kUInt16 : type_name = "uint16" ; break;
case TypeId::kInt32 : type_name = "int32" ; break;
case TypeId::kUInt32 : type_name = "uint32" ; break;
case TypeId::kInt64 : type_name = "int64" ; break;
case TypeId::kUInt64 : type_name = "uint64" ; break;
case TypeId::kFloat32: type_name = "float32"; break;
case TypeId::kFloat64: type_name = "float64"; break;
case TypeId::kFloat80: type_name = "float80"; break;
case TypeId::kMask8 : type_name = "mask8" ; break;
case TypeId::kMask16 : type_name = "mask16" ; break;
case TypeId::kMask32 : type_name = "mask32" ; break;
case TypeId::kMask64 : type_name = "mask64" ; break;
case TypeId::kMmx32 : type_name = "mmx32" ; break;
case TypeId::kMmx64 : type_name = "mmx64" ; break;
default:
typeName = "unknown";
type_name = "unknown";
break;
}
uint32_t baseSize = TypeUtils::sizeOf(scalarType);
if (typeSize > baseSize) {
uint32_t count = typeSize / baseSize;
return sb.appendFormat("%sx%u", typeName, unsigned(count));
uint32_t base_size = TypeUtils::size_of(scalar_type);
if (type_size > base_size) {
uint32_t count = type_size / base_size;
return sb.append_format("%sx%u", type_name, unsigned(count));
}
else {
return sb.append(typeName);
return sb.append(type_name);
}
}
Error formatFeature(
String& sb,
Arch arch,
uint32_t featureId) noexcept {
Error format_feature(String& sb, Arch arch, uint32_t feature_id) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatFeature(sb, featureId);
if (Environment::is_family_x86(arch)) {
return x86::FormatterInternal::format_feature(sb, feature_id);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatFeature(sb, featureId);
if (Environment::is_family_arm(arch)) {
return arm::FormatterInternal::format_feature(sb, feature_id);
}
#endif
return kErrorInvalidArch;
return make_error(Error::kInvalidArch);
}
Error formatLabel(
String& sb,
FormatFlags formatFlags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept {
DebugUtils::unused(formatFlags);
Error format_label(String& sb, FormatFlags format_flags, const BaseEmitter* emitter, uint32_t label_id) noexcept {
Support::maybe_unused(format_flags);
if (emitter && emitter->code()) {
CodeHolder* code = emitter->code();
if (ASMJIT_UNLIKELY(!code->isLabelValid(labelId))) {
return sb.appendFormat("<InvalidLabel:%u>", labelId);
if (ASMJIT_UNLIKELY(!code->is_label_valid(label_id))) {
return sb.append_format("<InvalidLabel:%u>", label_id);
}
const LabelEntry& le = code->labelEntry(labelId);
if (le.hasName()) {
if (le.hasParent()) {
uint32_t parentId = le.parentId();
const LabelEntry& pe = code->labelEntry(parentId);
const LabelEntry& le = code->label_entry_of(label_id);
if (le.has_name()) {
if (le.has_parent()) {
uint32_t parent_id = le.parent_id();
const LabelEntry& pe = code->label_entry_of(parent_id);
if (pe.hasName()) {
if (pe.has_name()) {
ASMJIT_PROPAGATE(sb.append(pe.name()));
}
else {
ASMJIT_PROPAGATE(sb.appendFormat("L%u", parentId));
ASMJIT_PROPAGATE(sb.append_format("L%u", parent_id));
}
ASMJIT_PROPAGATE(sb.append('.'));
}
if (le.labelType() == LabelType::kAnonymous) {
ASMJIT_PROPAGATE(sb.appendFormat("L%u@", labelId));
if (le.label_type() == LabelType::kAnonymous) {
ASMJIT_PROPAGATE(sb.append_format("L%u@", label_id));
}
return sb.append(le.name());
}
}
return sb.appendFormat("L%u", labelId);
return sb.append_format("L%u", label_id);
}
Error formatRegister(
Error format_register(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept {
RegType reg_type,
uint32_t reg_id) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
if (Environment::is_family_x86(arch)) {
return x86::FormatterInternal::format_register(sb, format_flags, emitter, arch, reg_type, reg_id);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatRegister(sb, formatFlags, emitter, arch, regType, regId);
if (Environment::is_family_arm(arch)) {
return arm::FormatterInternal::format_register(sb, format_flags, emitter, arch, reg_type, reg_id);
}
#endif
return kErrorInvalidArch;
return make_error(Error::kInvalidArch);
}
Error formatOperand(
Error format_operand(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
if (Environment::is_family_x86(arch)) {
return x86::FormatterInternal::format_operand(sb, format_flags, emitter, arch, op);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyARM(arch)) {
return arm::FormatterInternal::formatOperand(sb, formatFlags, emitter, arch, op);
if (Environment::is_family_arm(arch)) {
return arm::FormatterInternal::format_operand(sb, format_flags, emitter, arch, op);
}
#endif
return kErrorInvalidArch;
return make_error(Error::kInvalidArch);
}
ASMJIT_API Error formatDataType(
ASMJIT_API Error format_data_type(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
Arch arch,
TypeId typeId) noexcept
TypeId type_id) noexcept
{
DebugUtils::unused(formatFlags);
Support::maybe_unused(format_flags);
if (ASMJIT_UNLIKELY(uint32_t(arch) > uint32_t(Arch::kMaxValue))) {
return DebugUtils::errored(kErrorInvalidArch);
return make_error(Error::kInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0 || typeSize > 8) {
return DebugUtils::errored(kErrorInvalidState);
uint32_t type_size = TypeUtils::size_of(type_id);
if (type_size == 0 || type_size > 8) {
return make_error(Error::kInvalidState);
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
return sb.append(wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))]);
uint32_t type_size_log2 = Support::ctz(type_size);
return sb.append(word_name_table[size_t(ArchTraits::by_arch(arch).type_name_id_by_index(type_size_log2))]);
}
static Error formatDataHelper(String& sb, const char* typeName, uint32_t typeSize, const uint8_t* data, size_t itemCount) noexcept {
static Error format_data_helper(String& sb, const char* type_name, uint32_t type_size, const uint8_t* data, size_t item_count) noexcept {
sb.append('.');
sb.append(typeName);
sb.append(type_name);
sb.append(' ');
for (size_t i = 0; i < itemCount; i++) {
for (size_t i = 0; i < item_count; i++) {
uint64_t v = 0;
if (i != 0) {
ASMJIT_PROPAGATE(sb.append(", ", 2));
}
switch (typeSize) {
switch (type_size) {
case 1: v = data[0]; break;
case 2: v = Support::loadu_u16(data); break;
case 4: v = Support::loadu_u32(data); break;
case 8: v = Support::loadu_u64(data); break;
}
ASMJIT_PROPAGATE(sb.appendUInt(v, 16, typeSize * 2, StringFormatFlags::kAlternate));
data += typeSize;
ASMJIT_PROPAGATE(sb.append_uint(v, 16, type_size * 2, StringFormatFlags::kAlternate));
data += type_size;
}
return kErrorOk;
return Error::kOk;
}
Error formatData(
Error format_data(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount
TypeId type_id, const void* data, size_t item_count, size_t repeat_count
) noexcept {
DebugUtils::unused(formatFlags);
Support::maybe_unused(format_flags);
if (ASMJIT_UNLIKELY(!Environment::isDefinedArch(arch))) {
return DebugUtils::errored(kErrorInvalidArch);
if (ASMJIT_UNLIKELY(!Environment::is_defined_arch(arch))) {
return make_error(Error::kInvalidArch);
}
uint32_t typeSize = TypeUtils::sizeOf(typeId);
if (typeSize == 0) {
return DebugUtils::errored(kErrorInvalidState);
uint32_t type_size = TypeUtils::size_of(type_id);
if (type_size == 0) {
return make_error(Error::kInvalidState);
}
if (!Support::isPowerOf2(typeSize)) {
itemCount *= typeSize;
typeSize = 1;
if (!Support::is_power_of_2(type_size)) {
item_count *= type_size;
type_size = 1;
}
while (typeSize > 8u) {
typeSize >>= 1;
itemCount <<= 1;
while (type_size > 8u) {
type_size >>= 1;
item_count <<= 1;
}
uint32_t typeSizeLog2 = Support::ctz(typeSize);
const char* wordName = wordNameTable[size_t(ArchTraits::byArch(arch).typeNameIdByIndex(typeSizeLog2))];
uint32_t type_size_log2 = Support::ctz(type_size);
const char* word_name = word_name_table[size_t(ArchTraits::by_arch(arch).type_name_id_by_index(type_size_log2))];
if (repeatCount > 1) {
ASMJIT_PROPAGATE(sb.appendFormat(".repeat %zu ", repeatCount));
if (repeat_count > 1) {
ASMJIT_PROPAGATE(sb.append_format(".repeat %zu ", repeat_count));
}
return formatDataHelper(sb, wordName, typeSize, static_cast<const uint8_t*>(data), itemCount);
return format_data_helper(sb, word_name, type_size, static_cast<const uint8_t*>(data), item_count);
}
Error formatInstruction(
Error format_instruction(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept {
const BaseInst& inst, Span<const Operand_> operands) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
if (Environment::is_family_x86(arch)) {
return x86::FormatterInternal::format_instruction(sb, format_flags, emitter, arch, inst, operands);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::FormatterInternal::formatInstruction(sb, formatFlags, emitter, arch, inst, operands, opCount);
if (Environment::is_family_aarch64(arch)) {
return a64::FormatterInternal::format_instruction(sb, format_flags, emitter, arch, inst, operands);
}
#endif
return kErrorInvalidArch;
return make_error(Error::kInvalidArch);
}
#ifndef ASMJIT_NO_BUILDER
#ifndef ASMJIT_NO_COMPILER
static Error formatFuncValue(String& sb, FormatFlags formatFlags, const BaseEmitter* emitter, FuncValue value) noexcept {
TypeId typeId = value.typeId();
ASMJIT_PROPAGATE(formatTypeId(sb, typeId));
static Error format_func_value(String& sb, FormatFlags format_flags, const BaseEmitter* emitter, FuncValue value) noexcept {
TypeId type_id = value.type_id();
ASMJIT_PROPAGATE(format_type_id(sb, type_id));
if (value.isAssigned()) {
if (value.is_assigned()) {
ASMJIT_PROPAGATE(sb.append('@'));
if (value.isIndirect()) {
if (value.is_indirect()) {
ASMJIT_PROPAGATE(sb.append('['));
}
// NOTE: It should be either reg or stack, but never both. We
// use two IFs on purpose so if the FuncValue is both it would
// show in logs.
if (value.isReg()) {
ASMJIT_PROPAGATE(formatRegister(sb, formatFlags, emitter, emitter->arch(), value.regType(), value.regId()));
if (value.is_reg()) {
ASMJIT_PROPAGATE(format_register(sb, format_flags, emitter, emitter->arch(), value.reg_type(), value.reg_id()));
}
if (value.isStack()) {
ASMJIT_PROPAGATE(sb.appendFormat("[%d]", int(value.stackOffset())));
if (value.is_stack()) {
ASMJIT_PROPAGATE(sb.append_format("[%d]", int(value.stack_offset())));
}
if (value.isIndirect()) {
if (value.is_indirect()) {
ASMJIT_PROPAGATE(sb.append(']'));
}
}
return kErrorOk;
return Error::kOk;
}
static Error formatFuncValuePack(
static Error format_func_value_pack(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseCompiler* cc,
const FuncValuePack& pack,
const RegOnly* vRegs) noexcept {
const RegOnly* virt_regs) noexcept {
size_t count = pack.count();
if (!count) {
@@ -379,33 +370,33 @@ static Error formatFuncValuePack(
ASMJIT_PROPAGATE(sb.append('['));
}
for (uint32_t valueIndex = 0; valueIndex < count; valueIndex++) {
const FuncValue& value = pack[valueIndex];
for (uint32_t value_index = 0; value_index < count; value_index++) {
const FuncValue& value = pack[value_index];
if (!value) {
break;
}
if (valueIndex) {
if (value_index) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValue(sb, formatFlags, cc, value));
ASMJIT_PROPAGATE(format_func_value(sb, format_flags, cc, value));
if (vRegs) {
const VirtReg* virtReg = nullptr;
static const char nullReg[] = "<none>";
if (virt_regs) {
const VirtReg* virt_reg = nullptr;
static const char null_reg[] = "<none>";
if (vRegs[valueIndex].isReg() && cc->isVirtIdValid(vRegs[valueIndex].id())) {
virtReg = cc->virtRegById(vRegs[valueIndex].id());
if (virt_regs[value_index].is_reg() && cc->is_virt_id_valid(virt_regs[value_index].id())) {
virt_reg = cc->virt_reg_by_id(virt_regs[value_index].id());
}
ASMJIT_PROPAGATE(sb.append(' '));
if (virtReg) {
ASMJIT_PROPAGATE(Formatter::formatVirtRegName(sb, virtReg));
if (virt_reg) {
ASMJIT_PROPAGATE(Formatter::format_virt_reg_name(sb, virt_reg));
}
else {
ASMJIT_PROPAGATE(sb.append(nullReg, sizeof(nullReg) - 1));
ASMJIT_PROPAGATE(sb.append(null_reg, sizeof(null_reg) - 1));
}
}
}
@@ -414,223 +405,219 @@ static Error formatFuncValuePack(
ASMJIT_PROPAGATE(sb.append(']'));
}
return kErrorOk;
return Error::kOk;
}
static Error formatFuncRets(
static Error format_func_rets(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseCompiler* cc,
const FuncDetail& fd) noexcept {
return formatFuncValuePack(sb, formatFlags, cc, fd.retPack(), nullptr);
return format_func_value_pack(sb, format_flags, cc, fd.ret_pack(), nullptr);
}
static Error formatFuncArgs(
static Error format_func_args(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseCompiler* cc,
const FuncDetail& fd,
const FuncNode::ArgPack* argPacks) noexcept {
const FuncNode::ArgPack* arg_packs) noexcept {
uint32_t argCount = fd.argCount();
if (!argCount) {
uint32_t arg_count = fd.arg_count();
if (!arg_count) {
return sb.append("void");
}
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
if (argIndex) {
for (uint32_t arg_index = 0; arg_index < arg_count; arg_index++) {
if (arg_index) {
ASMJIT_PROPAGATE(sb.append(", "));
}
ASMJIT_PROPAGATE(formatFuncValuePack(sb, formatFlags, cc, fd.argPack(argIndex), argPacks[argIndex]._data));
ASMJIT_PROPAGATE(format_func_value_pack(sb, format_flags, cc, fd.arg_pack(arg_index), arg_packs[arg_index]._data));
}
return kErrorOk;
return Error::kOk;
}
#endif
Error formatNode(
Error format_node(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder,
const BaseNode* node) noexcept {
if (node->hasPosition() && formatOptions.hasFlag(FormatFlags::kPositions)) {
ASMJIT_PROPAGATE(sb.appendFormat("<%05u> ", node->position()));
if (node->has_position() && format_options.has_flag(FormatFlags::kPositions)) {
ASMJIT_PROPAGATE(sb.append_format("<%05u> ", uint32_t(node->position())));
}
size_t startLineIndex = sb.size();
size_t start_line_index = sb.size();
switch (node->type()) {
case NodeType::kInst:
case NodeType::kJump: {
const InstNode* instNode = node->as<InstNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
instNode->baseInst(), instNode->operands(), instNode->opCount()));
const InstNode* inst_node = node->as<InstNode>();
ASMJIT_PROPAGATE(builder->_funcs.format_instruction(sb, format_options.flags(), builder, builder->arch(), inst_node->baseInst(), inst_node->operands()));
break;
}
case NodeType::kSection: {
const SectionNode* sectionNode = node->as<SectionNode>();
if (builder->_code->isSectionValid(sectionNode->sectionId())) {
const Section* section = builder->_code->sectionById(sectionNode->sectionId());
ASMJIT_PROPAGATE(sb.appendFormat(".section %s", section->name()));
const SectionNode* section_node = node->as<SectionNode>();
if (builder->_code->is_section_valid(section_node->section_id())) {
const Section* section = builder->_code->section_by_id(section_node->section_id());
ASMJIT_PROPAGATE(sb.append_format(".section %s", section->name()));
}
break;
}
case NodeType::kLabel: {
const LabelNode* labelNode = node->as<LabelNode>();
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, labelNode->labelId()));
const LabelNode* label_node = node->as<LabelNode>();
ASMJIT_PROPAGATE(format_label(sb, format_options.flags(), builder, label_node->label_id()));
ASMJIT_PROPAGATE(sb.append(":"));
break;
}
case NodeType::kAlign: {
const AlignNode* alignNode = node->as<AlignNode>();
ASMJIT_PROPAGATE(sb.appendFormat(".align %u (%s)",
alignNode->alignment(),
alignNode->alignMode() == AlignMode::kCode ? "code" : "data"));
const AlignNode* align_node = node->as<AlignNode>();
ASMJIT_PROPAGATE(sb.append_format(".align %u (%s)",
align_node->alignment(),
align_node->align_mode() == AlignMode::kCode ? "code" : "data"));
break;
}
case NodeType::kEmbedData: {
const EmbedDataNode* embedNode = node->as<EmbedDataNode>();
const EmbedDataNode* embed_node = node->as<EmbedDataNode>();
ASMJIT_PROPAGATE(sb.append('.'));
ASMJIT_PROPAGATE(formatDataType(sb, formatOptions.flags(), builder->arch(), embedNode->typeId()));
ASMJIT_PROPAGATE(sb.appendFormat(" {Count=%zu Repeat=%zu TotalSize=%zu}", embedNode->itemCount(), embedNode->repeatCount(), embedNode->dataSize()));
ASMJIT_PROPAGATE(format_data_type(sb, format_options.flags(), builder->arch(), embed_node->type_id()));
ASMJIT_PROPAGATE(sb.append_format(" {Count=%zu Repeat=%zu TotalSize=%zu}", embed_node->item_count(), embed_node->repeat_count(), embed_node->data_size()));
break;
}
case NodeType::kEmbedLabel: {
const EmbedLabelNode* embedNode = node->as<EmbedLabelNode>();
const EmbedLabelNode* embed_node = node->as<EmbedLabelNode>();
ASMJIT_PROPAGATE(sb.append(".label "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
ASMJIT_PROPAGATE(format_label(sb, format_options.flags(), builder, embed_node->label_id()));
break;
}
case NodeType::kEmbedLabelDelta: {
const EmbedLabelDeltaNode* embedNode = node->as<EmbedLabelDeltaNode>();
const EmbedLabelDeltaNode* embed_node = node->as<EmbedLabelDeltaNode>();
ASMJIT_PROPAGATE(sb.append(".label ("));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->labelId()));
ASMJIT_PROPAGATE(format_label(sb, format_options.flags(), builder, embed_node->label_id()));
ASMJIT_PROPAGATE(sb.append(" - "));
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, embedNode->baseLabelId()));
ASMJIT_PROPAGATE(format_label(sb, format_options.flags(), builder, embed_node->base_label_id()));
ASMJIT_PROPAGATE(sb.append(")"));
break;
}
case NodeType::kConstPool: {
const ConstPoolNode* constPoolNode = node->as<ConstPoolNode>();
ASMJIT_PROPAGATE(sb.appendFormat("[ConstPool Size=%zu Alignment=%zu]", constPoolNode->size(), constPoolNode->alignment()));
const ConstPoolNode* const_pool_node = node->as<ConstPoolNode>();
ASMJIT_PROPAGATE(sb.append_format("[ConstPool Size=%zu Alignment=%zu]", const_pool_node->size(), const_pool_node->alignment()));
break;
};
case NodeType::kComment: {
const CommentNode* commentNode = node->as<CommentNode>();
return sb.appendFormat("; %s", commentNode->inlineComment());
const CommentNode* comment_node = node->as<CommentNode>();
return sb.append_format("; %s", comment_node->inline_comment());
}
case NodeType::kSentinel: {
const SentinelNode* sentinelNode = node->as<SentinelNode>();
const char* sentinelName = nullptr;
const SentinelNode* sentinel_node = node->as<SentinelNode>();
const char* sentinel_name = nullptr;
switch (sentinelNode->sentinelType()) {
switch (sentinel_node->sentinel_type()) {
case SentinelType::kFuncEnd:
sentinelName = "[FuncEnd]";
sentinel_name = "[FuncEnd]";
break;
default:
sentinelName = "[Sentinel]";
sentinel_name = "[Sentinel]";
break;
}
ASMJIT_PROPAGATE(sb.append(sentinelName));
ASMJIT_PROPAGATE(sb.append(sentinel_name));
break;
}
#ifndef ASMJIT_NO_COMPILER
case NodeType::kFunc: {
const FuncNode* funcNode = node->as<FuncNode>();
const FuncNode* func_node = node->as<FuncNode>();
if (builder->isCompiler()) {
ASMJIT_PROPAGATE(formatLabel(sb, formatOptions.flags(), builder, funcNode->labelId()));
if (builder->is_compiler()) {
ASMJIT_PROPAGATE(format_label(sb, format_options.flags(), builder, func_node->label_id()));
ASMJIT_PROPAGATE(sb.append(": "));
ASMJIT_PROPAGATE(formatFuncRets(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail()));
ASMJIT_PROPAGATE(format_func_rets(sb, format_options.flags(), static_cast<const BaseCompiler*>(builder), func_node->detail()));
ASMJIT_PROPAGATE(sb.append(" Func("));
ASMJIT_PROPAGATE(formatFuncArgs(sb, formatOptions.flags(), static_cast<const BaseCompiler*>(builder), funcNode->detail(), funcNode->argPacks()));
ASMJIT_PROPAGATE(format_func_args(sb, format_options.flags(), static_cast<const BaseCompiler*>(builder), func_node->detail(), func_node->arg_packs()));
ASMJIT_PROPAGATE(sb.append(")"));
}
break;
}
case NodeType::kFuncRet: {
const FuncRetNode* retNode = node->as<FuncRetNode>();
const FuncRetNode* ret_node = node->as<FuncRetNode>();
ASMJIT_PROPAGATE(sb.append("[FuncRet]"));
for (uint32_t i = 0; i < 2; i++) {
const Operand_& op = retNode->op(i);
if (!op.isNone()) {
const Operand_& op = ret_node->op(i);
if (!op.is_none()) {
ASMJIT_PROPAGATE(sb.append(i == 0 ? " " : ", "));
ASMJIT_PROPAGATE(formatOperand(sb, formatOptions.flags(), builder, builder->arch(), op));
ASMJIT_PROPAGATE(format_operand(sb, format_options.flags(), builder, builder->arch(), op));
}
}
break;
}
case NodeType::kInvoke: {
const InvokeNode* invokeNode = node->as<InvokeNode>();
ASMJIT_PROPAGATE(builder->_funcs.formatInstruction(sb, formatOptions.flags(), builder,
builder->arch(),
invokeNode->baseInst(), invokeNode->operands(), invokeNode->opCount()));
const InvokeNode* invoke_node = node->as<InvokeNode>();
ASMJIT_PROPAGATE(builder->_funcs.format_instruction(sb, format_options.flags(), builder, builder->arch(), invoke_node->baseInst(), invoke_node->operands()));
break;
}
#endif
default: {
ASMJIT_PROPAGATE(sb.appendFormat("[UserNode:%u]", node->type()));
ASMJIT_PROPAGATE(sb.append_format("[UserNode:%u]", node->type()));
break;
}
}
if (node->hasInlineComment()) {
size_t requiredPadding = paddingFromOptions(formatOptions, FormatPaddingGroup::kRegularLine);
size_t currentPadding = sb.size() - startLineIndex;
if (node->has_inline_comment()) {
size_t required_padding = padding_from_options(format_options, FormatPaddingGroup::kRegularLine);
size_t current_padding = sb.size() - start_line_index;
if (currentPadding < requiredPadding) {
ASMJIT_PROPAGATE(sb.appendChars(' ', requiredPadding - currentPadding));
if (current_padding < required_padding) {
ASMJIT_PROPAGATE(sb.append_chars(' ', required_padding - current_padding));
}
ASMJIT_PROPAGATE(sb.append("; "));
ASMJIT_PROPAGATE(sb.append(node->inlineComment()));
ASMJIT_PROPAGATE(sb.append(node->inline_comment()));
}
return kErrorOk;
return Error::kOk;
}
Error formatNodeList(
Error format_node_list(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder) noexcept {
return formatNodeList(sb, formatOptions, builder, builder->firstNode(), nullptr);
return format_node_list(sb, format_options, builder, builder->first_node(), nullptr);
}
Error formatNodeList(
Error format_node_list(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept {
const BaseNode* node = begin;
while (node != end) {
ASMJIT_PROPAGATE(formatNode(sb, formatOptions, builder, node));
ASMJIT_PROPAGATE(format_node(sb, format_options, builder, node));
ASMJIT_PROPAGATE(sb.append('\n'));
node = node->next();
}
return kErrorOk;
return Error::kOk;
}
#endif

View File

@@ -8,6 +8,7 @@
#include "../core/globals.h"
#include "../core/inst.h"
#include "../core/span.h"
#include "../core/string.h"
#include "../core/support.h"
@@ -113,37 +114,37 @@ public:
//! Tests whether the given `flag` is set in format flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasFlag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
ASMJIT_INLINE_NODEBUG bool has_flag(FormatFlags flag) const noexcept { return Support::test(_flags, flag); }
//! Resets all format flags to `flags`.
ASMJIT_INLINE_NODEBUG void setFlags(FormatFlags flags) noexcept { _flags = flags; }
ASMJIT_INLINE_NODEBUG void set_flags(FormatFlags flags) noexcept { _flags = flags; }
//! Adds `flags` to format flags.
ASMJIT_INLINE_NODEBUG void addFlags(FormatFlags flags) noexcept { _flags |= flags; }
ASMJIT_INLINE_NODEBUG void add_flags(FormatFlags flags) noexcept { _flags |= flags; }
//! Removes `flags` from format flags.
ASMJIT_INLINE_NODEBUG void clearFlags(FormatFlags flags) noexcept { _flags &= ~flags; }
ASMJIT_INLINE_NODEBUG void clear_flags(FormatFlags flags) noexcept { _flags &= ~flags; }
//! Returns indentation for the given indentation `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint8_t indentation(FormatIndentationGroup group) const noexcept { return _indentation[group]; }
//! Sets indentation for the given indentation `group`.
ASMJIT_INLINE_NODEBUG void setIndentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
ASMJIT_INLINE_NODEBUG void set_indentation(FormatIndentationGroup group, uint32_t n) noexcept { _indentation[group] = uint8_t(n); }
//! Resets indentation for the given indentation `group` to zero.
ASMJIT_INLINE_NODEBUG void resetIndentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
ASMJIT_INLINE_NODEBUG void reset_indentation(FormatIndentationGroup group) noexcept { _indentation[group] = uint8_t(0); }
//! Returns padding for the given padding `group`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t padding(FormatPaddingGroup group) const noexcept { return _padding[group]; }
//! Sets padding for the given padding `group`.
ASMJIT_INLINE_NODEBUG void setPadding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
ASMJIT_INLINE_NODEBUG void set_padding(FormatPaddingGroup group, size_t n) noexcept { _padding[group] = uint16_t(n); }
//! Resets padding for the given padding `group` to zero, which means that a default padding will be used
//! based on the target architecture properties.
ASMJIT_INLINE_NODEBUG void resetPadding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }
ASMJIT_INLINE_NODEBUG void reset_padding(FormatPaddingGroup group) noexcept { _padding[group] = uint16_t(0); }
//! \}
};
@@ -153,102 +154,102 @@ namespace Formatter {
#ifndef ASMJIT_NO_LOGGING
//! Appends a formatted `typeId` to the output string `sb`.
ASMJIT_API Error formatTypeId(
//! Appends a formatted `type_id` to the output string `sb`.
ASMJIT_API Error format_type_id(
String& sb,
TypeId typeId) noexcept;
TypeId type_id) noexcept;
//! Appends a formatted `featureId` to the output string `sb`.
//! Appends a formatted `feature_id` to the output string `sb`.
//!
//! See \ref CpuFeatures.
ASMJIT_API Error formatFeature(
ASMJIT_API Error format_feature(
String& sb,
Arch arch,
uint32_t featureId) noexcept;
uint32_t feature_id) noexcept;
//! Appends a formatted register to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format virtual registers, which won't be formatted properly
//! if the `emitter` is not provided.
ASMJIT_API Error formatRegister(
ASMJIT_API Error format_register(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
RegType regType,
uint32_t regId) noexcept;
RegType reg_type,
uint32_t reg_id) noexcept;
//! Appends a formatted label to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels properly, otherwise the formatted as
//! it is an anonymous label.
ASMJIT_API Error formatLabel(
ASMJIT_API Error format_label(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
uint32_t labelId) noexcept;
uint32_t label_id) noexcept;
//! Appends a formatted operand to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatOperand(
//! \ref format_register() and \ref format_label() for more details.
ASMJIT_API Error format_operand(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const Operand_& op) noexcept;
//! Appends a formatted data-type to the output string `sb`.
ASMJIT_API Error formatDataType(
ASMJIT_API Error format_data_type(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
Arch arch,
TypeId typeId) noexcept;
TypeId type_id) noexcept;
//! Appends a formatted data to the output string `sb`.
ASMJIT_API Error formatData(
ASMJIT_API Error format_data(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
Arch arch,
TypeId typeId, const void* data, size_t itemCount, size_t repeatCount = 1) noexcept;
TypeId type_id, const void* data, size_t item_count, size_t repeat_count = 1) noexcept;
//! Appends a formatted instruction to the output string `sb`.
//!
//! \note Emitter is optional, but it's required to format named labels and virtual registers. See
//! \ref formatRegister() and \ref formatLabel() for more details.
ASMJIT_API Error formatInstruction(
//! \ref format_register() and \ref format_label() for more details.
ASMJIT_API Error format_instruction(
String& sb,
FormatFlags formatFlags,
FormatFlags format_flags,
const BaseEmitter* emitter,
Arch arch,
const BaseInst& inst, const Operand_* operands, size_t opCount) noexcept;
const BaseInst& inst, Span<const Operand_> operands) noexcept;
#ifndef ASMJIT_NO_BUILDER
//! Appends a formatted node to the output string `sb`.
//!
//! The `node` must belong to the provided `builder`.
ASMJIT_API Error formatNode(
ASMJIT_API Error format_node(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder,
const BaseNode* node) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! All nodes that are part of the given `builder` will be appended.
ASMJIT_API Error formatNodeList(
ASMJIT_API Error format_node_list(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder) noexcept;
//! Appends formatted nodes to the output string `sb`.
//!
//! This function works the same as \ref formatNode(), but appends more nodes to the output string,
//! This function works the same as \ref format_node(), but appends more nodes to the output string,
//! separating each node with a newline '\n' character.
ASMJIT_API Error formatNodeList(
ASMJIT_API Error format_node_list(
String& sb,
const FormatOptions& formatOptions,
const FormatOptions& format_options,
const BaseBuilder* builder,
const BaseNode* begin,
const BaseNode* end) noexcept;

View File

@@ -19,16 +19,16 @@ ASMJIT_BEGIN_NAMESPACE
namespace Formatter {
[[maybe_unused]]
static ASMJIT_INLINE size_t paddingFromOptions(const FormatOptions& formatOptions, FormatPaddingGroup group) noexcept {
static constexpr uint16_t _defaultPaddingTable[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static ASMJIT_INLINE size_t padding_from_options(const FormatOptions& format_options, FormatPaddingGroup group) noexcept {
static constexpr uint16_t default_padding_table[uint32_t(FormatPaddingGroup::kMaxValue) + 1] = { 44, 26 };
static_assert(uint32_t(FormatPaddingGroup::kMaxValue) + 1 == 2, "If a new group is defined it must be added here");
size_t padding = formatOptions.padding(group);
return padding ? padding : size_t(_defaultPaddingTable[uint32_t(group)]);
size_t padding = format_options.padding(group);
return padding ? padding : size_t(default_padding_table[uint32_t(group)]);
}
Error formatVirtRegName(String& sb, const VirtReg* vReg) noexcept;
Error formatVirtRegNameWithPrefix(String& sb, const char* prefix, size_t prefixSize, const VirtReg* vReg) noexcept;
Error format_virt_reg_name(String& sb, const VirtReg* v_reg) noexcept;
Error format_virt_reg_name_with_prefix(String& sb, const char* prefix, size_t prefix_size, const VirtReg* v_reg) noexcept;
} // {Formatter}

View File

@@ -23,214 +23,214 @@ ASMJIT_BEGIN_NAMESPACE
// CallConv - Initialization & Reset
// =================================
ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId ccId, const Environment& environment) noexcept {
ASMJIT_FAVOR_SIZE Error CallConv::init(CallConvId call_conv_id, const Environment& environment) noexcept {
reset();
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) {
return x86::FuncInternal::initCallConv(*this, ccId, environment);
if (environment.is_family_x86()) {
return x86::FuncInternal::init_call_conv(*this, call_conv_id, environment);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initCallConv(*this, ccId, environment);
if (environment.is_family_aarch64()) {
return a64::FuncInternal::init_call_conv(*this, call_conv_id, environment);
}
#endif
return DebugUtils::errored(kErrorInvalidArgument);
return make_error(Error::kInvalidArgument);
}
// FuncDetail - Init / Reset
// =========================
ASMJIT_FAVOR_SIZE Error FuncDetail::init(const FuncSignature& signature, const Environment& environment) noexcept {
CallConvId ccId = signature.callConvId();
uint32_t argCount = signature.argCount();
CallConvId call_conv_id = signature.call_conv_id();
uint32_t arg_count = signature.arg_count();
if (ASMJIT_UNLIKELY(argCount > Globals::kMaxFuncArgs)) {
return DebugUtils::errored(kErrorInvalidArgument);
if (ASMJIT_UNLIKELY(arg_count > Globals::kMaxFuncArgs)) {
return make_error(Error::kInvalidArgument);
}
CallConv& cc = _callConv;
ASMJIT_PROPAGATE(cc.init(ccId, environment));
CallConv& cc = _call_conv;
ASMJIT_PROPAGATE(cc.init(call_conv_id, environment));
uint32_t registerSize = Environment::registerSizeFromArch(cc.arch());
uint32_t deabstractDelta = TypeUtils::deabstractDeltaOfSize(registerSize);
uint32_t register_size = Environment::reg_size_of_arch(cc.arch());
uint32_t deabstract_delta = TypeUtils::deabstract_delta_of_size(register_size);
const TypeId* signatureArgs = signature.args();
for (uint32_t argIndex = 0; argIndex < argCount; argIndex++) {
FuncValuePack& argPack = _args[argIndex];
argPack[0].initTypeId(TypeUtils::deabstract(signatureArgs[argIndex], deabstractDelta));
const TypeId* signature_args = signature.args();
for (uint32_t arg_index = 0; arg_index < arg_count; arg_index++) {
FuncValuePack& arg_pack = _args[arg_index];
arg_pack[0].init_type_id(TypeUtils::deabstract(signature_args[arg_index], deabstract_delta));
}
_argCount = uint8_t(argCount);
_vaIndex = uint8_t(signature.vaIndex());
_arg_count = uint8_t(arg_count);
_va_index = uint8_t(signature.va_index());
TypeId ret = signature.ret();
if (ret != TypeId::kVoid) {
_rets[0].initTypeId(TypeUtils::deabstract(ret, deabstractDelta));
_rets[0].init_type_id(TypeUtils::deabstract(ret, deabstract_delta));
}
#if !defined(ASMJIT_NO_X86)
if (environment.isFamilyX86()) {
return x86::FuncInternal::initFuncDetail(*this, signature, registerSize);
if (environment.is_family_x86()) {
return x86::FuncInternal::init_func_detail(*this, signature, register_size);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (environment.isFamilyAArch64()) {
return a64::FuncInternal::initFuncDetail(*this, signature);
if (environment.is_family_aarch64()) {
return a64::FuncInternal::init_func_detail(*this, signature);
}
#endif
// We should never bubble here as if `cc.init()` succeeded then there has to be an implementation for the current
// architecture. However, stay safe.
return DebugUtils::errored(kErrorInvalidArgument);
return make_error(Error::kInvalidArgument);
}
// FuncFrame - Init
// ================
ASMJIT_FAVOR_SIZE Error FuncFrame::init(const FuncDetail& func) noexcept {
Arch arch = func.callConv().arch();
if (!Environment::isValidArch(arch)) {
return DebugUtils::errored(kErrorInvalidArch);
Arch arch = func.call_conv().arch();
if (!Environment::is_valid_arch(arch)) {
return make_error(Error::kInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch);
const ArchTraits& arch_traits = ArchTraits::by_arch(arch);
// Initializing FuncFrame means making a copy of some properties of `func`. Properties like `_localStackSize` will
// Initializing FuncFrame means making a copy of some properties of `func`. Properties like `_local_stack_size` will
// be set by the user before the frame is finalized.
reset();
_arch = arch;
_spRegId = uint8_t(archTraits.spRegId());
_saRegId = uint8_t(Reg::kIdBad);
_sp_reg_id = uint8_t(arch_traits.sp_reg_id());
_sa_reg_id = uint8_t(Reg::kIdBad);
uint32_t naturalStackAlignment = func.callConv().naturalStackAlignment();
uint32_t minDynamicAlignment = Support::max<uint32_t>(naturalStackAlignment, 16);
uint32_t natural_stack_alignment = func.call_conv().natural_stack_alignment();
uint32_t min_dynamic_alignment = Support::max<uint32_t>(natural_stack_alignment, 16);
if (minDynamicAlignment == naturalStackAlignment) {
minDynamicAlignment <<= 1;
if (min_dynamic_alignment == natural_stack_alignment) {
min_dynamic_alignment <<= 1;
}
_naturalStackAlignment = uint8_t(naturalStackAlignment);
_minDynamicAlignment = uint8_t(minDynamicAlignment);
_redZoneSize = uint8_t(func.redZoneSize());
_spillZoneSize = uint8_t(func.spillZoneSize());
_finalStackAlignment = uint8_t(_naturalStackAlignment);
_natural_stack_alignment = uint8_t(natural_stack_alignment);
_min_dynamic_alignment = uint8_t(min_dynamic_alignment);
_red_zone_size = uint8_t(func.red_zone_size());
_spill_zone_size = uint8_t(func.spill_zone_size());
_final_stack_alignment = uint8_t(_natural_stack_alignment);
if (func.hasFlag(CallConvFlags::kCalleePopsStack)) {
_calleeStackCleanup = uint16_t(func.argStackSize());
if (func.has_flag(CallConvFlags::kCalleePopsStack)) {
_callee_stack_cleanup = uint16_t(func.arg_stack_size());
}
// Initial masks of dirty and preserved registers.
for (RegGroup group : RegGroupVirtValues{}) {
_dirtyRegs[group] = func.usedRegs(group);
_preservedRegs[group] = func.preservedRegs(group);
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
_dirty_regs[group] = func.used_regs(group);
_preserved_regs[group] = func.preserved_regs(group);
}
// Exclude stack pointer - this register is never included in saved GP regs.
_preservedRegs[RegGroup::kGp] &= ~Support::bitMask(archTraits.spRegId());
_preserved_regs[RegGroup::kGp] &= ~Support::bit_mask<RegMask>(arch_traits.sp_reg_id());
// The size and alignment of save/restore area of registers for each virtual register group
_saveRestoreRegSize = func.callConv()._saveRestoreRegSize;
_saveRestoreAlignment = func.callConv()._saveRestoreAlignment;
_save_restore_reg_size = func.call_conv()._save_restore_reg_size;
_save_restore_alignment = func.call_conv()._save_restore_alignment;
return kErrorOk;
return Error::kOk;
}
// FuncFrame - Finalize
// ====================
ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
if (!Environment::isValidArch(arch())) {
return DebugUtils::errored(kErrorInvalidArch);
if (!Environment::is_valid_arch(arch())) {
return make_error(Error::kInvalidArch);
}
const ArchTraits& archTraits = ArchTraits::byArch(arch());
const ArchTraits& arch_traits = ArchTraits::by_arch(arch());
uint32_t registerSize = _saveRestoreRegSize[RegGroup::kGp];
uint32_t vectorSize = _saveRestoreRegSize[RegGroup::kVec];
uint32_t returnAddressSize = archTraits.hasLinkReg() ? 0u : registerSize;
uint32_t register_size = _save_restore_reg_size[RegGroup::kGp];
uint32_t vector_size = _save_restore_reg_size[RegGroup::kVec];
uint32_t return_address_size = arch_traits.has_link_reg() ? 0u : register_size;
// The final stack alignment must be updated accordingly to call and local stack alignments.
uint32_t stackAlignment = _finalStackAlignment;
ASMJIT_ASSERT(stackAlignment == Support::max(_naturalStackAlignment, _callStackAlignment, _localStackAlignment));
uint32_t stack_alignment = _final_stack_alignment;
ASMJIT_ASSERT(stack_alignment == Support::max(_natural_stack_alignment, _call_stack_alignment, _local_stack_alignment));
bool hasFP = hasPreservedFP();
bool hasDA = hasDynamicAlignment();
bool has_fp = has_preserved_fp();
bool has_da = has_dynamic_alignment();
uint32_t kSp = archTraits.spRegId();
uint32_t kFp = archTraits.fpRegId();
uint32_t kLr = archTraits.linkRegId();
uint32_t kSp = arch_traits.sp_reg_id();
uint32_t kFp = arch_traits.fp_reg_id();
uint32_t kLr = arch_traits.link_reg_id();
// Make frame pointer dirty if the function uses it.
if (hasFP) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kFp);
if (has_fp) {
_dirty_regs[RegGroup::kGp] |= Support::bit_mask<RegMask>(kFp);
// Currently required by ARM, if this works differently across architectures we would have to generalize most
// likely in CallConv.
if (kLr != Reg::kIdBad) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(kLr);
_dirty_regs[RegGroup::kGp] |= Support::bit_mask<RegMask>(kLr);
}
}
// These two are identical if the function doesn't align its stack dynamically.
uint32_t saRegId = _saRegId;
if (saRegId == Reg::kIdBad) {
saRegId = kSp;
uint32_t sa_reg_id = _sa_reg_id;
if (sa_reg_id == Reg::kIdBad) {
sa_reg_id = kSp;
}
// Fix stack arguments base-register from SP to FP in case it was not picked before and the function performs
// dynamic stack alignment.
if (hasDA && saRegId == kSp) {
saRegId = kFp;
if (has_da && sa_reg_id == kSp) {
sa_reg_id = kFp;
}
// Mark as dirty any register but SP if used as SA pointer.
if (saRegId != kSp) {
_dirtyRegs[RegGroup::kGp] |= Support::bitMask(saRegId);
if (sa_reg_id != kSp) {
_dirty_regs[RegGroup::kGp] |= Support::bit_mask<RegMask>(sa_reg_id);
}
_spRegId = uint8_t(kSp);
_saRegId = uint8_t(saRegId);
_sp_reg_id = uint8_t(kSp);
_sa_reg_id = uint8_t(sa_reg_id);
// Setup stack size used to save preserved registers.
uint32_t saveRestoreSizes[2] {};
for (RegGroup group : RegGroupVirtValues{}) {
saveRestoreSizes[size_t(!archTraits.hasInstPushPop(group))]
+= Support::alignUp(Support::popcnt(savedRegs(group)) * saveRestoreRegSize(group), saveRestoreAlignment(group));
uint32_t save_restore_sizes[2] {};
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
save_restore_sizes[size_t(!arch_traits.has_inst_push_pop(group))]
+= Support::align_up(Support::popcnt(saved_regs(group)) * save_restore_reg_size(group), save_restore_alignment(group));
}
_pushPopSaveSize = uint16_t(saveRestoreSizes[0]);
_extraRegSaveSize = uint16_t(saveRestoreSizes[1]);
_push_pop_save_size = uint16_t(save_restore_sizes[0]);
_extra_reg_save_size = uint16_t(save_restore_sizes[1]);
uint32_t v = 0; // The beginning of the stack frame relative to SP after prolog.
v += callStackSize(); // Count 'callStackSize' <- This is used to call functions.
v = Support::alignUp(v, stackAlignment); // Align to function's stack alignment.
v += call_stack_size(); // Count 'call_stack_size' <- This is used to call functions.
v = Support::align_up(v, stack_alignment); // Align to function's stack alignment.
_localStackOffset = v; // Store 'localStackOffset' <- Function's local stack starts here.
v += localStackSize(); // Count 'localStackSize' <- Function's local stack ends here.
_local_stack_offset = v; // Store 'local_stack_offset' <- Function's local stack starts here.
v += local_stack_size(); // Count 'local_stack_size' <- Function's local stack ends here.
// If the function's stack must be aligned, calculate the alignment necessary to store vector registers, and set
// `FuncAttributes::kAlignedVecSR` to inform PEI that it can use instructions that perform aligned stores/loads.
if (stackAlignment >= vectorSize && _extraRegSaveSize) {
addAttributes(FuncAttributes::kAlignedVecSR);
v = Support::alignUp(v, vectorSize); // Align 'extraRegSaveOffset'.
if (stack_alignment >= vector_size && _extra_reg_save_size) {
add_attributes(FuncAttributes::kAlignedVecSR);
v = Support::align_up(v, vector_size); // Align 'extra_reg_save_offset'.
}
_extraRegSaveOffset = v; // Store 'extraRegSaveOffset' <- Non-GP save/restore starts here.
v += _extraRegSaveSize; // Count 'extraRegSaveSize' <- Non-GP save/restore ends here.
_extra_reg_save_offset = v; // Store 'extra_reg_save_offset' <- Non-GP save/restore starts here.
v += _extra_reg_save_size; // Count 'extra_reg_save_size' <- Non-GP save/restore ends here.
// Calculate if dynamic alignment (DA) slot (stored as offset relative to SP) is required and its offset.
if (hasDA && !hasFP) {
_daOffset = v; // Store 'daOffset' <- DA pointer would be stored here.
v += registerSize; // Count 'daOffset'.
if (has_da && !has_fp) {
_da_offset = v; // Store 'da_offset' <- DA pointer would be stored here.
v += register_size; // Count 'da_offset'.
}
else {
_daOffset = FuncFrame::kTagInvalidOffset;
_da_offset = FuncFrame::kTagInvalidOffset;
}
// Link Register
@@ -238,65 +238,65 @@ ASMJIT_FAVOR_SIZE Error FuncFrame::finalize() noexcept {
//
// The stack is aligned after the function call as the return address is stored in a link register. Some
// architectures may require to always have aligned stack after PUSH/POP operation, which is represented
// by ArchTraits::stackAlignmentConstraint().
// by ArchTraits::hw_stack_alignment().
//
// No Link Register (X86/X64)
// --------------------------
//
// The return address should be stored after GP save/restore regs. It has the same size as `registerSize`
// The return address should be stored after GP save/restore regs. It has the same size as `register_size`
// (basically the native register/pointer size). We don't adjust it now as `v` now contains the exact size
// that the function requires to adjust (call frame + stack frame, vec stack size). The stack (if we consider
// this size) is misaligned now, as it's always aligned before the function call - when `call()` is executed
// it pushes the current EIP|RIP onto the stack, and unaligns it by 12 or 8 bytes (depending on the
// architecture). So count number of bytes needed to align it up to the function's CallFrame (the beginning).
if (v || hasFuncCalls() || !returnAddressSize) {
v += Support::alignUpDiff(v + pushPopSaveSize() + returnAddressSize, stackAlignment);
if (v || has_func_calls() || !return_address_size) {
v += Support::align_up_diff(v + push_pop_save_size() + return_address_size, stack_alignment);
}
_pushPopSaveOffset = v; // Store 'pushPopSaveOffset' <- Function's push/pop save/restore starts here.
_stackAdjustment = v; // Store 'stackAdjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _pushPopSaveSize; // Count 'pushPopSaveSize' <- Function's push/pop save/restore ends here.
_finalStackSize = v; // Store 'finalStackSize' <- Final stack used by the function.
_push_pop_save_offset = v; // Store 'push_pop_save_offset' <- Function's push/pop save/restore starts here.
_stack_adjustment = v; // Store 'stack_adjustment' <- SA used by 'add SP, SA' and 'sub SP, SA'.
v += _push_pop_save_size; // Count 'push_pop_save_size' <- Function's push/pop save/restore ends here.
_final_stack_size = v; // Store 'final_stack_size' <- Final stack used by the function.
if (!archTraits.hasLinkReg()) {
v += registerSize; // Count 'ReturnAddress' <- As CALL pushes onto stack.
if (!arch_traits.has_link_reg()) {
v += register_size; // Count 'ReturnAddress' <- As CALL pushes onto stack.
}
// If the function performs dynamic stack alignment then the stack-adjustment must be aligned.
if (hasDA) {
_stackAdjustment = Support::alignUp(_stackAdjustment, stackAlignment);
if (has_da) {
_stack_adjustment = Support::align_up(_stack_adjustment, stack_alignment);
}
// Calculate where the function arguments start relative to SP.
_saOffsetFromSP = hasDA ? FuncFrame::kTagInvalidOffset : v;
_sa_offset_from_sp = has_da ? FuncFrame::kTagInvalidOffset : v;
// Calculate where the function arguments start relative to FP or user-provided register.
_saOffsetFromSA = hasFP ? returnAddressSize + registerSize // Return address + frame pointer.
: returnAddressSize + _pushPopSaveSize; // Return address + all push/pop regs.
_sa_offset_from_sa = has_fp ? return_address_size + register_size // Return address + frame pointer.
: return_address_size + _push_pop_save_size; // Return address + all push/pop regs.
return kErrorOk;
return Error::kOk;
}
// FuncArgsAssignment - UpdateFuncFrame
// ====================================
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) const noexcept {
ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::update_func_frame(FuncFrame& frame) const noexcept {
Arch arch = frame.arch();
const FuncDetail* func = funcDetail();
const FuncDetail* func = func_detail();
if (!func) {
return DebugUtils::errored(kErrorInvalidState);
return make_error(Error::kInvalidState);
}
RAConstraints constraints;
ASMJIT_PROPAGATE(constraints.init(arch));
FuncArgsContext ctx;
ASMJIT_PROPAGATE(ctx.initWorkData(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.markDstRegsDirty(frame));
ASMJIT_PROPAGATE(ctx.markScratchRegs(frame));
ASMJIT_PROPAGATE(ctx.markStackArgsReg(frame));
return kErrorOk;
ASMJIT_PROPAGATE(ctx.init_work_data(frame, *this, &constraints));
ASMJIT_PROPAGATE(ctx.mark_dst_regs_dirty(frame));
ASMJIT_PROPAGATE(ctx.mark_scratch_regs(frame));
ASMJIT_PROPAGATE(ctx.mark_stack_args_reg(frame));
return Error::kOk;
}
// Func API - Tests
@@ -305,9 +305,9 @@ ASMJIT_FAVOR_SIZE Error FuncArgsAssignment::updateFuncFrame(FuncFrame& frame) co
#if defined(ASMJIT_TEST)
UNIT(func_signature) {
FuncSignature signature;
signature.setRetT<int8_t>();
signature.addArgT<int16_t>();
signature.addArg(TypeId::kInt32);
signature.set_ret_t<int8_t>();
signature.add_arg_t<int16_t>();
signature.add_arg(TypeId::kInt32);
EXPECT_EQ(signature, FuncSignature::build<int8_t, int16_t, int32_t>());
}

File diff suppressed because it is too large Load Diff

View File

@@ -13,296 +13,298 @@ ASMJIT_BEGIN_NAMESPACE
//! \{
FuncArgsContext::FuncArgsContext() noexcept {
for (RegGroup group : RegGroupVirtValues{}) {
_workData[size_t(group)].reset();
for (WorkData& wd : _work_data) {
wd.reset();
}
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
ASMJIT_FAVOR_SIZE Error FuncArgsContext::init_work_data(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept {
Arch arch = frame.arch();
const FuncDetail& func = *args.funcDetail();
const FuncDetail& func = *args.func_detail();
_archTraits = &ArchTraits::byArch(arch);
_arch_traits = &ArchTraits::by_arch(arch);
_constraints = constraints;
_arch = arch;
// Initialize `_archRegs`.
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._archRegs = _constraints->availableRegs(group);
// Initialize `_arch_regs`.
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
_work_data[group]._arch_regs = _constraints->available_regs(group);
}
if (frame.hasPreservedFP()) {
_workData[size_t(RegGroup::kGp)]._archRegs &= ~Support::bitMask(archTraits().fpRegId());
if (frame.has_preserved_fp()) {
_work_data[size_t(RegGroup::kGp)]._arch_regs &= ~Support::bit_mask<RegMask>(arch_traits().fp_reg_id());
}
uint32_t reassignmentFlagMask = 0;
uint32_t reassignment_flag_mask = 0;
// Extract information from all function arguments/assignments and build Var[] array.
uint32_t varId = 0;
for (uint32_t argIndex = 0; argIndex < Globals::kMaxFuncArgs; argIndex++) {
for (uint32_t valueIndex = 0; valueIndex < Globals::kMaxValuePack; valueIndex++) {
const FuncValue& dst_ = args.arg(argIndex, valueIndex);
if (!dst_.isAssigned()) {
uint32_t var_id = 0;
uint32_t arg_count = args.func_detail()->arg_count();
for (uint32_t arg_index = 0; arg_index < arg_count; arg_index++) {
for (uint32_t value_index = 0; value_index < Globals::kMaxValuePack; value_index++) {
const FuncValue& dst_ = args.arg(arg_index, value_index);
if (!dst_.is_assigned()) {
continue;
}
const FuncValue& src_ = func.arg(argIndex, valueIndex);
if (ASMJIT_UNLIKELY(!src_.isAssigned())) {
return DebugUtils::errored(kErrorInvalidState);
const FuncValue& src_ = func.arg(arg_index, value_index);
if (ASMJIT_UNLIKELY(!src_.is_assigned())) {
return make_error(Error::kInvalidState);
}
Var& var = _vars[varId];
Var& var = _vars[var_id];
var.init(src_, dst_);
FuncValue& src = var.cur;
FuncValue& dst = var.out;
RegGroup dstGroup = RegGroup::kMaxValue;
uint32_t dstId = Reg::kIdBad;
WorkData* dstWd = nullptr;
RegGroup dst_group = RegGroup::kMaxValue;
uint32_t dst_id = Reg::kIdBad;
WorkData* dst_wd = nullptr;
// Not supported.
if (src.isIndirect()) {
return DebugUtils::errored(kErrorInvalidAssignment);
if (src.is_indirect()) {
return make_error(Error::kInvalidAssignment);
}
if (dst.isReg()) {
RegType dstType = dst.regType();
if (ASMJIT_UNLIKELY(!archTraits().hasRegType(dstType))) {
return DebugUtils::errored(kErrorInvalidRegType);
if (dst.is_reg()) {
RegType dst_type = dst.reg_type();
if (ASMJIT_UNLIKELY(!arch_traits().has_reg_type(dst_type))) {
return make_error(Error::kInvalidRegType);
}
// Copy TypeId from source if the destination doesn't have it. The RA used by BaseCompiler would never
// leave TypeId undefined, but users of FuncAPI can just assign phys regs without specifying their types.
if (!dst.hasTypeId()) {
dst.setTypeId(RegUtils::typeIdOf(dst.regType()));
if (!dst.has_type_id()) {
dst.set_type_id(RegUtils::type_id_of(dst.reg_type()));
}
dstGroup = RegUtils::groupOf(dstType);
if (ASMJIT_UNLIKELY(dstGroup > RegGroup::kMaxVirt)) {
return DebugUtils::errored(kErrorInvalidRegGroup);
dst_group = RegUtils::group_of(dst_type);
if (ASMJIT_UNLIKELY(dst_group > RegGroup::kMaxVirt)) {
return make_error(Error::kInvalidRegGroup);
}
dstWd = &_workData[dstGroup];
dstId = dst.regId();
dst_wd = &_work_data[dst_group];
dst_id = dst.reg_id();
if (ASMJIT_UNLIKELY(dstId >= 32 || !Support::bitTest(dstWd->archRegs(), dstId))) {
return DebugUtils::errored(kErrorInvalidPhysId);
if (ASMJIT_UNLIKELY(dst_id >= 32 || !Support::bit_test(dst_wd->arch_regs(), dst_id))) {
return make_error(Error::kInvalidPhysId);
}
if (ASMJIT_UNLIKELY(Support::bitTest(dstWd->dstRegs(), dstId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
if (ASMJIT_UNLIKELY(Support::bit_test(dst_wd->dst_regs(), dst_id))) {
return make_error(Error::kOverlappedRegs);
}
dstWd->_dstRegs |= Support::bitMask(dstId);
dstWd->_dstShuf |= Support::bitMask(dstId);
dstWd->_usedRegs |= Support::bitMask(dstId);
dst_wd->_dst_regs |= Support::bit_mask<RegMask>(dst_id);
dst_wd->_dst_shuf |= Support::bit_mask<RegMask>(dst_id);
dst_wd->_used_regs |= Support::bit_mask<RegMask>(dst_id);
}
else {
if (!dst.hasTypeId()) {
dst.setTypeId(src.typeId());
if (!dst.has_type_id()) {
dst.set_type_id(src.type_id());
}
OperandSignature signature = getSuitableRegForMemToMemMove(arch, dst.typeId(), src.typeId());
if (ASMJIT_UNLIKELY(!signature.isValid())) {
return DebugUtils::errored(kErrorInvalidState);
OperandSignature signature = get_suitable_reg_for_mem_to_mem_move(arch, dst.type_id(), src.type_id());
if (ASMJIT_UNLIKELY(!signature.is_valid())) {
return make_error(Error::kInvalidState);
}
_stackDstMask = uint8_t(_stackDstMask | Support::bitMask(signature.regGroup()));
_stack_dst_mask = uint8_t(_stack_dst_mask | Support::bit_mask<uint32_t>(signature.reg_group()));
}
if (src.isReg()) {
uint32_t srcId = src.regId();
RegGroup srcGroup = RegUtils::groupOf(src.regType());
if (src.is_reg()) {
uint32_t src_id = src.reg_id();
RegGroup src_group = RegUtils::group_of(src.reg_type());
if (dstGroup == srcGroup) {
ASMJIT_ASSERT(dstWd != nullptr);
dstWd->assign(varId, srcId);
if (dst_group == src_group) {
ASMJIT_ASSERT(dst_wd != nullptr);
dst_wd->assign(var_id, src_id);
reassignmentFlagMask |= uint32_t(dstId != srcId) << uint32_t(dstGroup);
reassignment_flag_mask |= uint32_t(dst_id != src_id) << uint32_t(dst_group);
if (dstId == srcId) {
if (dst_id == src_id) {
// The best case, register is allocated where it is expected to be. However, we should
// not mark this as done if both registers are GP and sign or zero extension is required.
if (dstGroup != RegGroup::kGp) {
var.markDone();
if (dst_group != RegGroup::kGp) {
var.mark_done();
}
else {
TypeId dt = dst.typeId();
TypeId st = src.typeId();
TypeId dt = dst.type_id();
TypeId st = src.type_id();
uint32_t dstSize = TypeUtils::sizeOf(dt);
uint32_t srcSize = TypeUtils::sizeOf(st);
uint32_t dst_size = TypeUtils::size_of(dt);
uint32_t src_size = TypeUtils::size_of(st);
if (dt == TypeId::kVoid || st == TypeId::kVoid || dstSize <= srcSize) {
var.markDone();
if (dt == TypeId::kVoid || st == TypeId::kVoid || dst_size <= src_size) {
var.mark_done();
}
}
}
}
else {
if (ASMJIT_UNLIKELY(srcGroup > RegGroup::kMaxVirt)) {
return DebugUtils::errored(kErrorInvalidState);
if (ASMJIT_UNLIKELY(src_group > RegGroup::kMaxVirt)) {
return make_error(Error::kInvalidState);
}
WorkData& srcData = _workData[size_t(srcGroup)];
srcData.assign(varId, srcId);
reassignmentFlagMask |= 1u << uint32_t(dstGroup);
WorkData& src_data = _work_data[size_t(src_group)];
src_data.assign(var_id, src_id);
reassignment_flag_mask |= 1u << uint32_t(dst_group);
}
}
else {
if (dstWd)
dstWd->_numStackArgs++;
_hasStackSrc = true;
if (dst_wd)
dst_wd->_num_stack_args++;
_has_stack_src = true;
}
varId++;
var_id++;
}
}
// Initialize WorkData::workRegs.
for (RegGroup group : RegGroupVirtValues{}) {
_workData[group]._workRegs =
(_workData[group].archRegs() & (frame.dirtyRegs(group) | ~frame.preservedRegs(group))) | _workData[group].dstRegs() | _workData[group].assignedRegs();
_workData[group]._needsScratch = (reassignmentFlagMask >> uint32_t(group)) & 1u;
// Initialize WorkData::work_regs.
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
_work_data[group]._work_regs =
(_work_data[group].arch_regs() & (frame.dirty_regs(group) | ~frame.preserved_regs(group))) | _work_data[group].dst_regs() | _work_data[group].assigned_regs();
_work_data[group]._needs_scratch = (reassignment_flag_mask >> uint32_t(group)) & 1u;
}
// Create a variable that represents `SARegId` if necessary.
bool saRegRequired = _hasStackSrc && frame.hasDynamicAlignment() && !frame.hasPreservedFP();
bool sa_reg_required = _has_stack_src && frame.has_dynamic_alignment() && !frame.has_preserved_fp();
WorkData& gpRegs = _workData[RegGroup::kGp];
uint32_t saCurRegId = frame.saRegId();
uint32_t saOutRegId = args.saRegId();
WorkData& gp_regs = _work_data[RegGroup::kGp];
uint32_t sa_cur_reg_id = frame.sa_reg_id();
uint32_t sa_out_reg_id = args.sa_reg_id();
if (saCurRegId != Reg::kIdBad) {
if (sa_cur_reg_id != Reg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with input registers.
if (ASMJIT_UNLIKELY(gpRegs.isAssigned(saCurRegId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
if (ASMJIT_UNLIKELY(gp_regs.is_assigned(sa_cur_reg_id))) {
return make_error(Error::kOverlappedRegs);
}
}
if (saOutRegId != Reg::kIdBad) {
if (sa_out_reg_id != Reg::kIdBad) {
// Check if the provided `SARegId` doesn't collide with argument assignments.
if (ASMJIT_UNLIKELY(Support::bitTest(gpRegs.dstRegs(), saOutRegId))) {
return DebugUtils::errored(kErrorOverlappedRegs);
if (ASMJIT_UNLIKELY(Support::bit_test(gp_regs.dst_regs(), sa_out_reg_id))) {
return make_error(Error::kOverlappedRegs);
}
saRegRequired = true;
sa_reg_required = true;
}
if (saRegRequired) {
TypeId ptrTypeId = Environment::is32Bit(arch) ? TypeId::kUInt32 : TypeId::kUInt64;
RegType ptrRegType = Environment::is32Bit(arch) ? RegType::kGp32 : RegType::kGp64;
if (sa_reg_required) {
TypeId ptr_type_id = Environment::is_32bit(arch) ? TypeId::kUInt32 : TypeId::kUInt64;
RegType ptr_reg_type = Environment::is_32bit(arch) ? RegType::kGp32 : RegType::kGp64;
_saVarId = uint8_t(varId);
_hasPreservedFP = frame.hasPreservedFP();
_sa_var_id = uint8_t(var_id);
_has_preserved_fp = frame.has_preserved_fp();
Var& var = _vars[varId];
Var& var = _vars[var_id];
var.reset();
if (saCurRegId == Reg::kIdBad) {
if (saOutRegId != Reg::kIdBad && !gpRegs.isAssigned(saOutRegId)) {
saCurRegId = saOutRegId;
if (sa_cur_reg_id == Reg::kIdBad) {
if (sa_out_reg_id != Reg::kIdBad && !gp_regs.is_assigned(sa_out_reg_id)) {
sa_cur_reg_id = sa_out_reg_id;
}
else {
RegMask availableRegs = gpRegs.availableRegs();
if (!availableRegs) {
availableRegs = gpRegs.archRegs() & ~gpRegs.workRegs();
RegMask available_regs = gp_regs.available_regs();
if (!available_regs) {
available_regs = gp_regs.arch_regs() & ~gp_regs.work_regs();
}
if (ASMJIT_UNLIKELY(!availableRegs)) {
return DebugUtils::errored(kErrorNoMorePhysRegs);
if (ASMJIT_UNLIKELY(!available_regs)) {
return make_error(Error::kNoMorePhysRegs);
}
saCurRegId = Support::ctz(availableRegs);
sa_cur_reg_id = Support::ctz(available_regs);
}
}
var.cur.initReg(ptrRegType, saCurRegId, ptrTypeId);
gpRegs.assign(varId, saCurRegId);
gpRegs._workRegs |= Support::bitMask(saCurRegId);
var.cur.init_reg(ptr_reg_type, sa_cur_reg_id, ptr_type_id);
gp_regs.assign(var_id, sa_cur_reg_id);
gp_regs._work_regs |= Support::bit_mask<RegMask>(sa_cur_reg_id);
if (saOutRegId != Reg::kIdBad) {
var.out.initReg(ptrRegType, saOutRegId, ptrTypeId);
gpRegs._dstRegs |= Support::bitMask(saOutRegId);
gpRegs._workRegs |= Support::bitMask(saOutRegId);
if (sa_out_reg_id != Reg::kIdBad) {
var.out.init_reg(ptr_reg_type, sa_out_reg_id, ptr_type_id);
gp_regs._dst_regs |= Support::bit_mask<RegMask>(sa_out_reg_id);
gp_regs._work_regs |= Support::bit_mask<RegMask>(sa_out_reg_id);
}
else {
var.markDone();
var.mark_done();
}
varId++;
var_id++;
}
_varCount = varId;
_var_count = var_id;
// Detect register swaps.
for (varId = 0; varId < _varCount; varId++) {
Var& var = _vars[varId];
if (var.cur.isReg() && var.out.isReg()) {
uint32_t srcId = var.cur.regId();
uint32_t dstId = var.out.regId();
for (var_id = 0; var_id < _var_count; var_id++) {
Var& var = _vars[var_id];
if (var.cur.is_reg() && var.out.is_reg()) {
uint32_t src_id = var.cur.reg_id();
uint32_t dst_id = var.out.reg_id();
RegGroup group = RegUtils::groupOf(var.cur.regType());
if (group != RegUtils::groupOf(var.out.regType())) {
RegGroup group = RegUtils::group_of(var.cur.reg_type());
if (group != RegUtils::group_of(var.out.reg_type())) {
continue;
}
WorkData& wd = _workData[group];
if (wd.isAssigned(dstId)) {
Var& other = _vars[wd._physToVarId[dstId]];
if (RegUtils::groupOf(other.out.regType()) == group && other.out.regId() == srcId) {
wd._numSwaps++;
_regSwapsMask = uint8_t(_regSwapsMask | Support::bitMask(group));
WorkData& wd = _work_data[group];
if (wd.is_assigned(dst_id)) {
Var& other = _vars[wd._phys_to_var_id[dst_id]];
if (RegUtils::group_of(other.out.reg_type()) == group && other.out.reg_id() == src_id) {
wd._num_swaps++;
_reg_swaps_mask = uint8_t(_reg_swaps_mask | Support::bit_mask<uint32_t>(group));
}
}
}
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markDstRegsDirty(FuncFrame& frame) noexcept {
for (RegGroup group : RegGroupVirtValues{}) {
WorkData& wd = _workData[group];
uint32_t regs = wd.usedRegs() | wd._dstShuf;
ASMJIT_FAVOR_SIZE Error FuncArgsContext::mark_dst_regs_dirty(FuncFrame& frame) noexcept {
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
WorkData& wd = _work_data[group];
uint32_t regs = wd.used_regs() | wd._dst_shuf;
wd._workRegs |= regs;
frame.addDirtyRegs(group, regs);
wd._work_regs |= regs;
frame.add_dirty_regs(group, regs);
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexcept {
uint32_t groupMask = 0;
ASMJIT_FAVOR_SIZE Error FuncArgsContext::mark_scratch_regs(FuncFrame& frame) noexcept {
uint32_t group_mask = 0;
// Handle stack to stack moves.
groupMask |= _stackDstMask;
group_mask |= _stack_dst_mask;
// Handle register swaps.
groupMask |= _regSwapsMask & ~Support::bitMask(RegGroup::kGp);
group_mask |= _reg_swaps_mask & ~Support::bit_mask<uint32_t>(RegGroup::kGp);
if (!groupMask)
return kErrorOk;
if (!group_mask)
return Error::kOk;
// Selects one dirty register per affected group that can be used as a scratch register.
for (RegGroup group : RegGroupVirtValues{}) {
if (Support::bitTest(groupMask, group)) {
WorkData& wd = _workData[group];
if (wd._needsScratch) {
for (RegGroup group : Support::enumerate(RegGroup::kMaxVirt)) {
if (Support::bit_test(group_mask, group)) {
WorkData& wd = _work_data[group];
if (wd._needs_scratch) {
// Initially, pick some clobbered or dirty register.
RegMask workRegs = wd.workRegs();
RegMask regs = workRegs & ~(wd.usedRegs() | wd._dstShuf);
RegMask work_regs = wd.work_regs();
RegMask regs = work_regs & ~(wd.used_regs() | wd._dst_shuf);
// If that didn't work out pick some register which is not in 'used'.
if (!regs) {
regs = workRegs & ~wd.usedRegs();
regs = work_regs & ~wd.used_regs();
}
// If that didn't work out pick any other register that is allocable.
// This last resort case will, however, result in marking one more
// register dirty.
if (!regs) {
regs = wd.archRegs() & ~workRegs;
regs = wd.arch_regs() & ~work_regs;
}
// If that didn't work out we will have to use XORs instead of MOVs.
@@ -310,26 +312,26 @@ ASMJIT_FAVOR_SIZE Error FuncArgsContext::markScratchRegs(FuncFrame& frame) noexc
continue;
}
RegMask regMask = Support::blsi(regs);
wd._workRegs |= regMask;
frame.addDirtyRegs(group, regMask);
RegMask reg_mask = Support::blsi(regs);
wd._work_regs |= reg_mask;
frame.add_dirty_regs(group, reg_mask);
}
}
}
return kErrorOk;
return Error::kOk;
}
ASMJIT_FAVOR_SIZE Error FuncArgsContext::markStackArgsReg(FuncFrame& frame) noexcept {
if (_saVarId != kVarIdNone) {
const Var& var = _vars[_saVarId];
frame.setSARegId(var.cur.regId());
ASMJIT_FAVOR_SIZE Error FuncArgsContext::mark_stack_args_reg(FuncFrame& frame) noexcept {
if (_sa_var_id != kVarIdNone) {
const Var& var = _vars[_sa_var_id];
frame.set_sa_reg_id(var.cur.reg_id());
}
else if (frame.hasPreservedFP()) {
frame.setSARegId(archTraits().fpRegId());
else if (frame.has_preserved_fp()) {
frame.set_sa_reg_id(arch_traits().fp_reg_id());
}
return kErrorOk;
return Error::kOk;
}
//! \}

View File

@@ -10,8 +10,8 @@
#include "../core/environment.h"
#include "../core/func.h"
#include "../core/operand.h"
#include "../core/radefs_p.h"
#include "../core/support.h"
#include "../core/raconstraints_p.h"
ASMJIT_BEGIN_NAMESPACE
@@ -19,29 +19,29 @@ ASMJIT_BEGIN_NAMESPACE
//! \addtogroup asmjit_core
//! \{
static inline OperandSignature getSuitableRegForMemToMemMove(Arch arch, TypeId dstTypeId, TypeId srcTypeId) noexcept {
const ArchTraits& archTraits = ArchTraits::byArch(arch);
static inline OperandSignature get_suitable_reg_for_mem_to_mem_move(Arch arch, TypeId dst_type_id, TypeId src_type_id) noexcept {
const ArchTraits& arch_traits = ArchTraits::by_arch(arch);
uint32_t signature = 0u;
uint32_t dstSize = TypeUtils::sizeOf(dstTypeId);
uint32_t srcSize = TypeUtils::sizeOf(srcTypeId);
uint32_t maxSize = Support::max<uint32_t>(dstSize, srcSize);
uint32_t regSize = Environment::registerSizeFromArch(arch);
uint32_t dst_size = TypeUtils::size_of(dst_type_id);
uint32_t src_size = TypeUtils::size_of(src_type_id);
uint32_t max_size = Support::max<uint32_t>(dst_size, src_size);
uint32_t reg_size = Environment::reg_size_of_arch(arch);
if (maxSize <= regSize || (TypeUtils::isInt(dstTypeId) && TypeUtils::isInt(srcTypeId))) {
signature = maxSize <= 4 ? RegTraits<RegType::kGp32>::kSignature
if (max_size <= reg_size || (TypeUtils::is_int(dst_type_id) && TypeUtils::is_int(src_type_id))) {
signature = max_size <= 4 ? RegTraits<RegType::kGp32>::kSignature
: RegTraits<RegType::kGp64>::kSignature;
}
else if (maxSize <= 8 && archTraits.hasRegType(RegType::kVec64)) {
else if (max_size <= 8 && arch_traits.has_reg_type(RegType::kVec64)) {
signature = RegTraits<RegType::kVec64>::kSignature;
}
else if (maxSize <= 16 && archTraits.hasRegType(RegType::kVec128)) {
else if (max_size <= 16 && arch_traits.has_reg_type(RegType::kVec128)) {
signature = RegTraits<RegType::kVec128>::kSignature;
}
else if (maxSize <= 32 && archTraits.hasRegType(RegType::kVec256)) {
else if (max_size <= 32 && arch_traits.has_reg_type(RegType::kVec256)) {
signature = RegTraits<RegType::kVec256>::kSignature;
}
else if (maxSize <= 64 && archTraits.hasRegType(RegType::kVec512)) {
else if (max_size <= 64 && arch_traits.has_reg_type(RegType::kVec512)) {
signature = RegTraits<RegType::kVec512>::kSignature;
}
@@ -68,154 +68,154 @@ public:
out.reset();
}
ASMJIT_INLINE_NODEBUG bool isDone() const noexcept { return cur.isDone(); }
ASMJIT_INLINE_NODEBUG void markDone() noexcept { cur.addFlags(FuncValue::kFlagIsDone); }
ASMJIT_INLINE_NODEBUG bool is_done() const noexcept { return cur.is_done(); }
ASMJIT_INLINE_NODEBUG void mark_done() noexcept { cur.add_flags(FuncValue::kFlagIsDone); }
};
struct WorkData {
//! All allocable registers provided by the architecture.
RegMask _archRegs;
RegMask _arch_regs;
//! All registers that can be used by the shuffler.
RegMask _workRegs;
RegMask _work_regs;
//! Registers used by the shuffler (all).
RegMask _usedRegs;
RegMask _used_regs;
//! Assigned registers.
RegMask _assignedRegs;
RegMask _assigned_regs;
//! Destination registers assigned to arguments or SA.
RegMask _dstRegs;
RegMask _dst_regs;
//! Destination registers that require shuffling.
RegMask _dstShuf;
RegMask _dst_shuf;
//! Number of register swaps.
uint8_t _numSwaps;
uint8_t _num_swaps;
//! Number of stack loads.
uint8_t _numStackArgs;
uint8_t _num_stack_args;
//! Whether this work data would need reassignment.
uint8_t _needsScratch;
uint8_t _needs_scratch;
//! Reserved (only used as padding).
uint8_t _reserved[5];
//! Physical ID to variable ID mapping.
uint8_t _physToVarId[32];
uint8_t _phys_to_var_id[32];
inline void reset() noexcept {
_archRegs = 0;
_workRegs = 0;
_usedRegs = 0;
_assignedRegs = 0;
_dstRegs = 0;
_dstShuf = 0;
_numSwaps = 0;
_numStackArgs = 0;
_needsScratch = 0;
_arch_regs = 0;
_work_regs = 0;
_used_regs = 0;
_assigned_regs = 0;
_dst_regs = 0;
_dst_shuf = 0;
_num_swaps = 0;
_num_stack_args = 0;
_needs_scratch = 0;
memset(_reserved, 0, sizeof(_reserved));
memset(_physToVarId, kVarIdNone, 32);
memset(_phys_to_var_id, kVarIdNone, 32);
}
[[nodiscard]]
inline bool isAssigned(uint32_t regId) const noexcept {
ASMJIT_ASSERT(regId < 32);
return Support::bitTest(_assignedRegs, regId);
inline bool is_assigned(uint32_t reg_id) const noexcept {
ASMJIT_ASSERT(reg_id < 32);
return Support::bit_test(_assigned_regs, reg_id);
}
inline void assign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(!isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == kVarIdNone);
inline void assign(uint32_t var_id, uint32_t reg_id) noexcept {
ASMJIT_ASSERT(!is_assigned(reg_id));
ASMJIT_ASSERT(_phys_to_var_id[reg_id] == kVarIdNone);
_physToVarId[regId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(regId);
_phys_to_var_id[reg_id] = uint8_t(var_id);
_assigned_regs ^= Support::bit_mask<RegMask>(reg_id);
}
inline void reassign(uint32_t varId, uint32_t newId, uint32_t oldId) noexcept {
ASMJIT_ASSERT( isAssigned(oldId));
ASMJIT_ASSERT(!isAssigned(newId));
ASMJIT_ASSERT(_physToVarId[oldId] == varId);
ASMJIT_ASSERT(_physToVarId[newId] == kVarIdNone);
inline void reassign(uint32_t var_id, uint32_t new_id, uint32_t old_id) noexcept {
ASMJIT_ASSERT( is_assigned(old_id));
ASMJIT_ASSERT(!is_assigned(new_id));
ASMJIT_ASSERT(_phys_to_var_id[old_id] == var_id);
ASMJIT_ASSERT(_phys_to_var_id[new_id] == kVarIdNone);
_physToVarId[oldId] = uint8_t(kVarIdNone);
_physToVarId[newId] = uint8_t(varId);
_assignedRegs ^= Support::bitMask(newId) ^ Support::bitMask(oldId);
_phys_to_var_id[old_id] = uint8_t(kVarIdNone);
_phys_to_var_id[new_id] = uint8_t(var_id);
_assigned_regs ^= Support::bit_mask<RegMask>(new_id) ^ Support::bit_mask<RegMask>(old_id);
}
inline void swap(uint32_t aVarId, uint32_t aRegId, uint32_t bVarId, uint32_t bRegId) noexcept {
ASMJIT_ASSERT(isAssigned(aRegId));
ASMJIT_ASSERT(isAssigned(bRegId));
ASMJIT_ASSERT(_physToVarId[aRegId] == aVarId);
ASMJIT_ASSERT(_physToVarId[bRegId] == bVarId);
inline void swap(uint32_t a_var_id, uint32_t a_reg_id, uint32_t b_var_id, uint32_t b_reg_id) noexcept {
ASMJIT_ASSERT(is_assigned(a_reg_id));
ASMJIT_ASSERT(is_assigned(b_reg_id));
ASMJIT_ASSERT(_phys_to_var_id[a_reg_id] == a_var_id);
ASMJIT_ASSERT(_phys_to_var_id[b_reg_id] == b_var_id);
_physToVarId[aRegId] = uint8_t(bVarId);
_physToVarId[bRegId] = uint8_t(aVarId);
_phys_to_var_id[a_reg_id] = uint8_t(b_var_id);
_phys_to_var_id[b_reg_id] = uint8_t(a_var_id);
}
inline void unassign(uint32_t varId, uint32_t regId) noexcept {
ASMJIT_ASSERT(isAssigned(regId));
ASMJIT_ASSERT(_physToVarId[regId] == varId);
inline void unassign(uint32_t var_id, uint32_t reg_id) noexcept {
ASMJIT_ASSERT(is_assigned(reg_id));
ASMJIT_ASSERT(_phys_to_var_id[reg_id] == var_id);
DebugUtils::unused(varId);
_physToVarId[regId] = uint8_t(kVarIdNone);
_assignedRegs ^= Support::bitMask(regId);
Support::maybe_unused(var_id);
_phys_to_var_id[reg_id] = uint8_t(kVarIdNone);
_assigned_regs ^= Support::bit_mask<RegMask>(reg_id);
}
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask archRegs() const noexcept { return _archRegs; }
ASMJIT_INLINE_NODEBUG RegMask arch_regs() const noexcept { return _arch_regs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask workRegs() const noexcept { return _workRegs; }
ASMJIT_INLINE_NODEBUG RegMask work_regs() const noexcept { return _work_regs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask usedRegs() const noexcept { return _usedRegs; }
ASMJIT_INLINE_NODEBUG RegMask used_regs() const noexcept { return _used_regs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask assignedRegs() const noexcept { return _assignedRegs; }
ASMJIT_INLINE_NODEBUG RegMask assigned_regs() const noexcept { return _assigned_regs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask dstRegs() const noexcept { return _dstRegs; }
ASMJIT_INLINE_NODEBUG RegMask dst_regs() const noexcept { return _dst_regs; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegMask availableRegs() const noexcept { return _workRegs & ~_assignedRegs; }
ASMJIT_INLINE_NODEBUG RegMask available_regs() const noexcept { return _work_regs & ~_assigned_regs; }
};
//! Architecture traits.
const ArchTraits* _archTraits = nullptr;
const ArchTraits* _arch_traits = nullptr;
//! Architecture constraints.
const RAConstraints* _constraints = nullptr;
//! Target architecture.
Arch _arch = Arch::kUnknown;
//! Has arguments passed via stack (SRC).
bool _hasStackSrc = false;
bool _has_stack_src = false;
//! Has preserved frame-pointer (FP).
bool _hasPreservedFP = false;
bool _has_preserved_fp = false;
//! Has arguments assigned to stack (DST).
uint8_t _stackDstMask = 0;
uint8_t _stack_dst_mask = 0;
//! Register swap groups (bit-mask).
uint8_t _regSwapsMask = 0;
uint8_t _saVarId = kVarIdNone;
uint32_t _varCount = 0;
Support::Array<WorkData, Globals::kNumVirtGroups> _workData;
uint8_t _reg_swaps_mask = 0;
uint8_t _sa_var_id = kVarIdNone;
uint32_t _var_count = 0;
Support::Array<WorkData, Globals::kNumVirtGroups> _work_data;
Var _vars[Globals::kMaxFuncArgs * Globals::kMaxValuePack + 1];
FuncArgsContext() noexcept;
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const ArchTraits& archTraits() const noexcept { return *_archTraits; }
ASMJIT_INLINE_NODEBUG const ArchTraits& arch_traits() const noexcept { return *_arch_traits; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Arch arch() const noexcept { return _arch; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t varCount() const noexcept { return _varCount; }
ASMJIT_INLINE_NODEBUG uint32_t var_count() const noexcept { return _var_count; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG size_t indexOf(const Var* var) const noexcept { return (size_t)(var - _vars); }
ASMJIT_INLINE_NODEBUG size_t index_of(const Var* var) const noexcept { return (size_t)(var - _vars); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG Var& var(size_t varId) noexcept { return _vars[varId]; }
ASMJIT_INLINE_NODEBUG Var& var(size_t var_id) noexcept { return _vars[var_id]; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const Var& var(size_t varId) const noexcept { return _vars[varId]; }
ASMJIT_INLINE_NODEBUG const Var& var(size_t var_id) const noexcept { return _vars[var_id]; }
Error initWorkData(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;
Error markScratchRegs(FuncFrame& frame) noexcept;
Error markDstRegsDirty(FuncFrame& frame) noexcept;
Error markStackArgsReg(FuncFrame& frame) noexcept;
Error init_work_data(const FuncFrame& frame, const FuncArgsAssignment& args, const RAConstraints* constraints) noexcept;
Error mark_scratch_regs(FuncFrame& frame) noexcept;
Error mark_dst_regs_dirty(FuncFrame& frame) noexcept;
Error mark_stack_args_reg(FuncFrame& frame) noexcept;
};
//! \}

View File

@@ -12,10 +12,10 @@ ASMJIT_BEGIN_NAMESPACE
// DebugUtils - Error As String
// ============================
ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
ASMJIT_FAVOR_SIZE const char* DebugUtils::error_as_string(Error err) noexcept {
#ifndef ASMJIT_NO_TEXT
// @EnumStringBegin{"enum": "ErrorCode", "output": "sError", "strip": "kError"}@
static const char sErrorString[] =
// @EnumStringBegin{"enum": "Error", "output": "error_string", "strip": "k"}@
static const char error_string_data[] =
"Ok\0"
"OutOfMemory\0"
"InvalidArgument\0"
@@ -90,7 +90,7 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
"ProtectionFailure\0"
"<Unknown>\0";
static const uint16_t sErrorIndex[] = {
static const uint16_t error_string_index[] = {
0, 3, 15, 31, 44, 56, 71, 90, 108, 123, 132, 148, 165, 178, 192, 210, 230,
247, 264, 283, 298, 314, 333, 352, 370, 392, 410, 429, 444, 460, 474, 488,
508, 533, 551, 573, 595, 612, 629, 645, 661, 677, 694, 709, 724, 744, 764,
@@ -99,18 +99,18 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
};
// @EnumStringEnd@
return sErrorString + sErrorIndex[Support::min<Error>(err, kErrorCount)];
return error_string_data + error_string_index[Support::min(uint32_t(err), uint32_t(Error::kMaxValue) + 1u)];
#else
DebugUtils::unused(err);
static const char noMessage[] = "";
return noMessage;
Support::maybe_unused(err);
static const char no_message[] = "";
return no_message;
#endif
}
// DebugUtils - Debug Output
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
ASMJIT_FAVOR_SIZE void DebugUtils::debug_output(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
@@ -121,14 +121,14 @@ ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
// DebugUtils - Fatal Errors
// =========================
ASMJIT_FAVOR_SIZE void DebugUtils::assertionFailed(const char* file, int line, const char* msg) noexcept {
ASMJIT_FAVOR_SIZE void DebugUtils::assertion_failure(const char* file, int line, const char* msg) noexcept {
char str[1024];
snprintf(str, 1024,
"[asmjit] Assertion failed at %s (line %d):\n"
"[asmjit] %s\n", file, line, msg);
debugOutput(str);
debug_output(str);
::abort();
}

View File

@@ -13,6 +13,7 @@ ASMJIT_BEGIN_NAMESPACE
//! \cond INTERNAL
//! \addtogroup asmjit_utilities
//! \{
namespace Support {
//! Cast designed to cast between function and void* pointers.
@@ -26,13 +27,13 @@ struct PlacementNew { void* ptr; };
#if defined(ASMJIT_NO_STDCXX)
namespace Support {
ASMJIT_INLINE void* operatorNew(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operatorDelete(void* p) noexcept { if (p) free(p); }
ASMJIT_INLINE void* operator_new(size_t n) noexcept { return malloc(n); }
ASMJIT_INLINE void operator_delete(void* p) noexcept { if (p) free(p); }
} // {Support}
#define ASMJIT_BASE_CLASS(TYPE) \
ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operatorNew(n); } \
ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operatorDelete(ptr); } \
ASMJIT_INLINE void* operator new(size_t n) noexcept { return Support::operator_new(n); } \
ASMJIT_INLINE void operator delete(void* ptr) noexcept { Support::operator_delete(ptr); } \
\
ASMJIT_INLINE void* operator new(size_t, void* ptr) noexcept { return ptr; } \
ASMJIT_INLINE void operator delete(void*, void*) noexcept {} \
@@ -49,18 +50,6 @@ namespace Support {
//! \addtogroup asmjit_core
//! \{
//! Byte order.
enum class ByteOrder {
//! Little endian.
kLE = 0,
//! Big endian.
kBE = 1,
//! Native byte order of the target architecture.
kNative = ASMJIT_ARCH_LE ? kLE : kBE,
//! Swapped byte order of the target architecture.
kSwapped = ASMJIT_ARCH_LE ? kBE : kLE
};
//! A policy that can be used with some `reset()` member functions.
enum class ResetPolicy : uint32_t {
//! Soft reset, doesn't deallocate memory (default).
@@ -81,16 +70,13 @@ static constexpr uint32_t kAllocAlignment = 8u;
//! Aggressive growing strategy threshold.
static constexpr uint32_t kGrowThreshold = 1024u * 1024u * 16u;
//! Default alignment of allocation requests to use when using Zone.
static constexpr uint32_t kZoneAlignment = 8u;
//! Maximum depth of RB-Tree is:
//!
//! `2 * log2(n + 1)`
//!
//! Size of RB node is at least two pointers (without data), so a theoretical architecture limit would be:
//!
//! `2 * log2(addressableMemorySize / sizeof(Node) + 1)`
//! `2 * log2(addressable_memory_size / sizeof(Node) + 1)`
//!
//! Which yields 30 on 32-bit arch and 61 on 64-bit arch. The final value was adjusted by +1 for safety reasons.
static constexpr uint32_t kMaxTreeHeight = (ASMJIT_ARCH_BITS == 32 ? 30 : 61) + 1;
@@ -116,15 +102,12 @@ static constexpr uint32_t kMaxLabelNameSize = 2048;
//! Maximum section name size.
static constexpr uint32_t kMaxSectionNameSize = 35;
//! Maximum size of comment.
//! Maximum size of a comment.
static constexpr uint32_t kMaxCommentSize = 1024;
//! Invalid identifier.
static constexpr uint32_t kInvalidId = 0xFFFFFFFFu;
//! Returned by `indexOf()` and similar when working with containers that use 32-bit index/size.
static constexpr uint32_t kNotFound = 0xFFFFFFFFu;
//! Invalid base address.
static constexpr uint64_t kNoBaseAddress = ~uint64_t(0);
@@ -139,251 +122,235 @@ static const constexpr Init_ Init {};
//! A decorator used to not initialize.
static const constexpr NoInit_ NoInit {};
//! Invalid index, which means not in a string. Used by API that can match items in spans, vectors, etc...
static constexpr size_t kNPos = ~size_t(0);
template<typename T>
static ASMJIT_INLINE_NODEBUG bool is_npos(const T& index) noexcept { return index == T(~T(0)); }
} // {Globals}
//! Casts a `void*` pointer `func` to a function pointer `Func`.
template<typename Func>
static ASMJIT_INLINE_NODEBUG Func ptr_as_func(void* func) noexcept { return Support::ptr_cast_impl<Func, void*>(func); }
static ASMJIT_INLINE_NODEBUG Func ptr_as_func(void* p) noexcept {
return Support::ptr_cast_impl<Func, void*>(p);
}
//! Casts a `void*` pointer `func` to a function pointer `Func`.
template<typename Func>
static ASMJIT_INLINE_NODEBUG Func ptr_as_func(void* p, size_t offset) noexcept {
return Support::ptr_cast_impl<Func, void*>(static_cast<void*>(static_cast<char*>(p) + offset));
}
//! Casts a function pointer `func` to a void pointer `void*`.
template<typename Func>
static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept { return Support::ptr_cast_impl<void*, Func>(func); }
static ASMJIT_INLINE_NODEBUG void* func_as_ptr(Func func) noexcept {
return Support::ptr_cast_impl<void*, Func>(func);
}
//! \}
//! \addtogroup asmjit_error_handling
//! \{
//! AsmJit error type (uint32_t).
using Error = uint32_t;
//! AsmJit error codes.
enum ErrorCode : uint32_t {
// @EnumValuesBegin{"enum": "ErrorCode"}@
//! AsmJit error code.
enum class Error : uint32_t {
// @EnumValuesBegin{"enum": "Error"}@
//! No error (success).
kErrorOk = 0,
kOk = 0,
//! Out of memory.
kErrorOutOfMemory,
kOutOfMemory,
//! Invalid argument.
kErrorInvalidArgument,
kInvalidArgument,
//! Invalid state.
//!
//! If this error is returned it means that either you are doing something wrong or AsmJit caught itself by
//! doing something wrong. This error should never be ignored.
kErrorInvalidState,
kInvalidState,
//! Invalid or incompatible architecture.
kErrorInvalidArch,
kInvalidArch,
//! The object is not initialized.
kErrorNotInitialized,
kNotInitialized,
//! The object is already initialized.
kErrorAlreadyInitialized,
kAlreadyInitialized,
//! Either a built-in feature was disabled at compile time and it's not available or the feature is not
//! available on the target platform.
//!
//! For example trying to allocate large pages on unsupported platform would return this error.
kErrorFeatureNotEnabled,
kFeatureNotEnabled,
//! Too many handles (Windows) or file descriptors (Unix/Posix).
kErrorTooManyHandles,
kTooManyHandles,
//! Code generated is larger than allowed.
kErrorTooLarge,
kTooLarge,
//! No code generated.
//!
//! Returned by runtime if the \ref CodeHolder contains no code.
kErrorNoCodeGenerated,
kNoCodeGenerated,
//! Invalid directive.
kErrorInvalidDirective,
kInvalidDirective,
//! Attempt to use uninitialized label.
kErrorInvalidLabel,
kInvalidLabel,
//! Label index overflow - a single \ref BaseAssembler instance can hold almost 2^32 (4 billion) labels. If
//! there is an attempt to create more labels then this error is returned.
kErrorTooManyLabels,
kTooManyLabels,
//! Label is already bound.
kErrorLabelAlreadyBound,
kLabelAlreadyBound,
//! Label is already defined (named labels).
kErrorLabelAlreadyDefined,
kLabelAlreadyDefined,
//! Label name is too long.
kErrorLabelNameTooLong,
kLabelNameTooLong,
//! Label must always be local if it's anonymous (without a name).
kErrorInvalidLabelName,
//! Parent id passed to \ref CodeHolder::newNamedLabelId() was either invalid or parent is not supported by
kInvalidLabelName,
//! Parent id passed to \ref CodeHolder::new_named_label_id() was either invalid or parent is not supported by
//! the requested `LabelType`.
kErrorInvalidParentLabel,
kInvalidParentLabel,
//! Invalid section.
kErrorInvalidSection,
kInvalidSection,
//! Too many sections (section index overflow).
kErrorTooManySections,
kTooManySections,
//! Invalid section name (most probably too long).
kErrorInvalidSectionName,
kInvalidSectionName,
//! Relocation index overflow (too many relocations).
kErrorTooManyRelocations,
kTooManyRelocations,
//! Invalid relocation entry.
kErrorInvalidRelocEntry,
kInvalidRelocEntry,
//! Reloc entry contains address that is out of range (unencodable).
kErrorRelocOffsetOutOfRange,
kRelocOffsetOutOfRange,
//! Invalid assignment to a register, function argument, or function return value.
kErrorInvalidAssignment,
kInvalidAssignment,
//! Invalid instruction.
kErrorInvalidInstruction,
kInvalidInstruction,
//! Invalid register type.
kErrorInvalidRegType,
kInvalidRegType,
//! Invalid register group.
kErrorInvalidRegGroup,
kInvalidRegGroup,
//! Invalid physical register id.
kErrorInvalidPhysId,
kInvalidPhysId,
//! Invalid virtual register id.
kErrorInvalidVirtId,
kInvalidVirtId,
//! Invalid element index (ARM).
kErrorInvalidElementIndex,
kInvalidElementIndex,
//! Invalid prefix combination (X86|X64).
kErrorInvalidPrefixCombination,
kInvalidPrefixCombination,
//! Invalid LOCK prefix (X86|X64).
kErrorInvalidLockPrefix,
kInvalidLockPrefix,
//! Invalid XACQUIRE prefix (X86|X64).
kErrorInvalidXAcquirePrefix,
kInvalidXAcquirePrefix,
//! Invalid XRELEASE prefix (X86|X64).
kErrorInvalidXReleasePrefix,
kInvalidXReleasePrefix,
//! Invalid REP prefix (X86|X64).
kErrorInvalidRepPrefix,
kInvalidRepPrefix,
//! Invalid REX prefix (X86|X64).
kErrorInvalidRexPrefix,
kInvalidRexPrefix,
//! Invalid {...} register (X86|X64).
kErrorInvalidExtraReg,
kInvalidExtraReg,
//! Invalid {k} use (not supported by the instruction) (X86|X64).
kErrorInvalidKMaskUse,
kInvalidKMaskUse,
//! Invalid {k}{z} use (not supported by the instruction) (X86|X64).
kErrorInvalidKZeroUse,
kInvalidKZeroUse,
//! Invalid broadcast - Currently only related to invalid use of AVX-512 {1tox} (X86|X64).
kErrorInvalidBroadcast,
kInvalidBroadcast,
//! Invalid 'embedded-rounding' {er} or 'suppress-all-exceptions' {sae} (AVX-512) (X86|X64).
kErrorInvalidEROrSAE,
kInvalidEROrSAE,
//! Invalid address used (not encodable).
kErrorInvalidAddress,
kInvalidAddress,
//! Invalid index register used in memory address (not encodable).
kErrorInvalidAddressIndex,
kInvalidAddressIndex,
//! Invalid address scale (not encodable).
kErrorInvalidAddressScale,
kInvalidAddressScale,
//! Invalid use of 64-bit address.
kErrorInvalidAddress64Bit,
kInvalidAddress64Bit,
//! Invalid use of 64-bit address that require 32-bit zero-extension (X64).
kErrorInvalidAddress64BitZeroExtension,
kInvalidAddress64BitZeroExtension,
//! Invalid displacement (not encodable).
kErrorInvalidDisplacement,
kInvalidDisplacement,
//! Invalid segment (X86|X86_64).
kErrorInvalidSegment,
kInvalidSegment,
//! Invalid immediate (out of bounds on X86 and invalid pattern on ARM).
kErrorInvalidImmediate,
kInvalidImmediate,
//! Invalid operand size.
kErrorInvalidOperandSize,
kInvalidOperandSize,
//! Ambiguous operand size (memory has zero size while it's required to determine the operation type.
kErrorAmbiguousOperandSize,
kAmbiguousOperandSize,
//! Mismatching operand size (size of multiple operands doesn't match the operation size).
kErrorOperandSizeMismatch,
kOperandSizeMismatch,
//! Invalid option.
kErrorInvalidOption,
kInvalidOption,
//! Option already defined.
kErrorOptionAlreadyDefined,
kOptionAlreadyDefined,
//! Invalid TypeId.
kErrorInvalidTypeId,
kInvalidTypeId,
//! Invalid use of a 8-bit GPB-HIGH register.
kErrorInvalidUseOfGpbHi,
kInvalidUseOfGpbHi,
//! Invalid use of a 64-bit GPQ register in 32-bit mode.
kErrorInvalidUseOfGpq,
kInvalidUseOfGpq,
//! Invalid use of an 80-bit float (\ref TypeId::kFloat80).
kErrorInvalidUseOfF80,
kInvalidUseOfF80,
//! Instruction requires the use of consecutive registers, but registers in operands weren't (AVX512, ASIMD load/store, etc...).
kErrorNotConsecutiveRegs,
kNotConsecutiveRegs,
//! Failed to allocate consecutive registers - allocable registers either too restricted or a bug in RW info.
kErrorConsecutiveRegsAllocation,
kConsecutiveRegsAllocation,
//! Illegal virtual register - reported by instruction validation.
kErrorIllegalVirtReg,
kIllegalVirtReg,
//! AsmJit cannot create more virtual registers.
kErrorTooManyVirtRegs,
kTooManyVirtRegs,
//! AsmJit requires a physical register, but no one is available.
kErrorNoMorePhysRegs,
kNoMorePhysRegs,
//! A variable has been assigned more than once to a function argument (BaseCompiler).
kErrorOverlappedRegs,
kOverlappedRegs,
//! Invalid register to hold stack arguments offset.
kErrorOverlappingStackRegWithRegArg,
kOverlappingStackRegWithRegArg,
//! Unbound label cannot be evaluated by expression.
kErrorExpressionLabelNotBound,
kExpressionLabelNotBound,
//! Arithmetic overflow during expression evaluation.
kErrorExpressionOverflow,
kExpressionOverflow,
//! Failed to open anonymous memory handle or file descriptor.
kErrorFailedToOpenAnonymousMemory,
kFailedToOpenAnonymousMemory,
//! Failed to open a file.
//!
//! \note This is a generic error that is used by internal filesystem API.
kErrorFailedToOpenFile,
kFailedToOpenFile,
//! Protection failure can be returned from a virtual memory allocator or when trying to change memory access
//! permissions.
kErrorProtectionFailure,
kProtectionFailure,
// @EnumValuesEnd@
//! Count of AsmJit error codes.
kErrorCount
};
//! Debugging utilities.
namespace DebugUtils {
//! Maximum value of a valid AsmJit `Error` code.
kMaxValue = kProtectionFailure,
//! \cond INTERNAL
//! Used to silence warnings about unused arguments or variables.
template<typename... Args>
static ASMJIT_INLINE_NODEBUG void unused(Args&&...) noexcept {}
//! Error code used to inform the caller about an alternative success state.
//!
//! \remarks This value is only used internally in AsmJit.
kByPass = 0xFFFFFFFFu
//! \endcond
};
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `errored` can help with tracing the origin of any
//! error reported / returned by AsmJit.
[[nodiscard]]
static constexpr Error errored(Error err) noexcept { return err; }
//! Returns a printable version of `asmjit::Error` code.
[[nodiscard]]
ASMJIT_API const char* errorAsString(Error err) noexcept;
//! Called to output debugging message(s).
ASMJIT_API void debugOutput(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put at \ref assertionFailed() function
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
[[noreturn]]
ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
static inline constexpr Error kErrorOk = Error::kOk;
//! \def ASMJIT_ASSERT(...)
//!
@@ -392,25 +359,18 @@ ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noe
#define ASMJIT_ASSERT(...) \
do { \
if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
::asmjit::DebugUtils::assertion_failure(__FILE__, __LINE__, #__VA_ARGS__); \
} \
} while (0)
#else
#define ASMJIT_ASSERT(...) ((void)0)
#endif
#define ASMJIT_RUNTIME_ASSERT(...) \
do { \
if (ASMJIT_UNLIKELY(!(__VA_ARGS__))) { \
::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, #__VA_ARGS__); \
} \
} while (0)
//! \def ASMJIT_NOT_REACHED()
//!
//! Run-time assertion used in code that should never be reached.
#if defined(ASMJIT_BUILD_DEBUG)
#define ASMJIT_NOT_REACHED() ::asmjit::DebugUtils::assertionFailed(__FILE__, __LINE__, "ASMJIT_NOT_REACHED()")
#define ASMJIT_NOT_REACHED() ::asmjit::DebugUtils::assertion_failure(__FILE__, __LINE__, "ASMJIT_NOT_REACHED()")
#elif defined(__GNUC__)
#define ASMJIT_NOT_REACHED() __builtin_unreachable()
#else
@@ -423,14 +383,66 @@ ASMJIT_API void assertionFailed(const char* file, int line, const char* msg) noe
//! internally, but kept public for users that want to use the same technique to propagate errors to the caller.
#define ASMJIT_PROPAGATE(...) \
do { \
::asmjit::Error _err_ = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(_err_)) { \
return _err_; \
::asmjit::Error error_to_propagate = __VA_ARGS__; \
if (ASMJIT_UNLIKELY(error_to_propagate != ::asmjit::Error::kOk)) { \
return error_to_propagate; \
} \
} while (0)
//! \}
//! Returns the error `err` passed.
//!
//! Provided for debugging purposes. Putting a breakpoint inside `make_error` can help with tracing the origin of any
//! error reported or returned by AsmJit.
[[nodiscard]]
static constexpr Error make_error(Error err) noexcept { return err; }
//! Debugging utilities.
namespace DebugUtils {
//! Returns a printable version of `asmjit::Error` code.
[[nodiscard]]
ASMJIT_API const char* error_as_string(Error err) noexcept;
//! Called to output debugging messages.
ASMJIT_API void debug_output(const char* str) noexcept;
//! Called on assertion failure.
//!
//! \param file Source file name where it happened.
//! \param line Line in the source file.
//! \param msg Message to display.
//!
//! If you have problems with assertion failures a breakpoint can be put at \ref assertion_failure() function
//! (asmjit/core/globals.cpp). A call stack will be available when such assertion failure is triggered. AsmJit
//! always returns errors on failures, assertions are a last resort and usually mean unrecoverable state due to out
//! of range array access or totally invalid arguments like nullptr where a valid pointer should be provided, etc...
[[noreturn]]
ASMJIT_API void assertion_failure(const char* file, int line, const char* msg) noexcept;
} // {DebugUtils}
//! Output parameter.
template<typename T>
class Out {
protected:
T& _val;
public:
ASMJIT_INLINE_NODEBUG explicit Out(T& val) noexcept
: _val(val) {}
ASMJIT_INLINE_NODEBUG Out& operator=(const T& val) noexcept {
_val = val;
return *this;
}
ASMJIT_INLINE_NODEBUG T& value() const noexcept { return _val; }
ASMJIT_INLINE_NODEBUG T& operator*() const noexcept { return _val; }
ASMJIT_INLINE_NODEBUG T* operator->() const noexcept { return &_val; }
};
ASMJIT_END_NAMESPACE
//! Implementation of a placement new so we don't have to depend on `<new>`.

View File

@@ -21,32 +21,32 @@ ASMJIT_BEGIN_NAMESPACE
// ===========================
#ifndef ASMJIT_NO_TEXT
Error InstAPI::instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept {
Error InstAPI::inst_id_to_string(Arch arch, InstId inst_id, InstStringifyOptions options, String& output) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::instIdToString(instId, options, output);
if (Environment::is_family_x86(arch)) {
return x86::InstInternal::inst_id_to_string(inst_id, options, output);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::instIdToString(instId, options, output);
if (Environment::is_family_aarch64(arch)) {
return a64::InstInternal::inst_id_to_string(inst_id, options, output);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
return make_error(Error::kInvalidArch);
}
InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
InstId InstAPI::string_to_inst_id(Arch arch, const char* s, size_t len) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::stringToInstId(s, len);
if (Environment::is_family_x86(arch)) {
return x86::InstInternal::string_to_inst_id(s, len);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::stringToInstId(s, len);
if (Environment::is_family_aarch64(arch)) {
return a64::InstInternal::string_to_inst_id(s, len);
}
#endif
@@ -58,25 +58,25 @@ InstId InstAPI::stringToInstId(Arch arch, const char* s, size_t len) noexcept {
// ==================
#ifndef ASMJIT_NO_VALIDATION
Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags) noexcept {
Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, ValidationFlags validation_flags) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
if (Environment::is_family_x86(arch)) {
if (arch == Arch::kX86) {
return x86::InstInternal::validateX86(inst, operands, opCount, validationFlags);
return x86::InstInternal::validate_x86(inst, operands, op_count, validation_flags);
}
else {
return x86::InstInternal::validateX64(inst, operands, opCount, validationFlags);
return x86::InstInternal::validate_x64(inst, operands, op_count, validation_flags);
}
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::validate(inst, operands, opCount, validationFlags);
if (Environment::is_family_aarch64(arch)) {
return a64::InstInternal::validate(inst, operands, op_count, validation_flags);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
return make_error(Error::kInvalidArch);
}
#endif // !ASMJIT_NO_VALIDATION
@@ -84,24 +84,24 @@ Error InstAPI::validate(Arch arch, const BaseInst& inst, const Operand_* operand
// =====================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(opCount > Globals::kMaxOpCount)) {
return DebugUtils::errored(kErrorInvalidArgument);
Error InstAPI::query_rw_info(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, InstRWInfo* out) noexcept {
if (ASMJIT_UNLIKELY(op_count > Globals::kMaxOpCount)) {
return make_error(Error::kInvalidArgument);
}
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::queryRWInfo(arch, inst, operands, opCount, out);
if (Environment::is_family_x86(arch)) {
return x86::InstInternal::query_rw_info(arch, inst, operands, op_count, out);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::queryRWInfo(inst, operands, opCount, out);
if (Environment::is_family_aarch64(arch)) {
return a64::InstInternal::query_rw_info(inst, operands, op_count, out);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
return make_error(Error::kInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION
@@ -109,20 +109,20 @@ Error InstAPI::queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* oper
// =======================
#ifndef ASMJIT_NO_INTROSPECTION
Error InstAPI::queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept {
Error InstAPI::query_features(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, CpuFeatures* out) noexcept {
#if !defined(ASMJIT_NO_X86)
if (Environment::isFamilyX86(arch)) {
return x86::InstInternal::queryFeatures(arch, inst, operands, opCount, out);
if (Environment::is_family_x86(arch)) {
return x86::InstInternal::query_features(arch, inst, operands, op_count, out);
}
#endif
#if !defined(ASMJIT_NO_AARCH64)
if (Environment::isFamilyAArch64(arch)) {
return a64::InstInternal::queryFeatures(inst, operands, opCount, out);
if (Environment::is_family_aarch64(arch)) {
return a64::InstInternal::query_features(inst, operands, op_count, out);
}
#endif
return DebugUtils::errored(kErrorInvalidArch);
return make_error(Error::kInvalidArch);
}
#endif // !ASMJIT_NO_INTROSPECTION

View File

@@ -92,7 +92,7 @@ enum class InstOptions : uint32_t {
//! - `pinsrq xmm0, ?, 0` followed by `pinsrq xmm0, ?, 1`
//!
//! - If the allocated virtual register is used temporarily for scalar operations. For example if you allocate a
//! full vector like `x86::Compiler::newXmm()` and then use that vector for scalar operations you should use
//! full vector like `x86::Compiler::new_xmm()` and then use that vector for scalar operations you should use
//! `overwrite()` directive:
//!
//! - `sqrtss x, y` - only LO element of `x` is changed, if you don't
@@ -155,13 +155,13 @@ enum class InstOptions : uint32_t {
//! AVX-512: Mask of all possible AVX-512 options except EVEX prefix flag.
kX86_AVX512Mask = 0x00FC0000u,
//! Force REX.B and/or VEX.B field (X64 only).
//! Force REX.B and/or VEX.B field (X64 only, used internally).
kX86_OpCodeB = 0x01000000u,
//! Force REX.X and/or VEX.X field (X64 only).
//! Force REX.X and/or VEX.X field (X64 only, used internally).
kX86_OpCodeX = 0x02000000u,
//! Force REX.R and/or VEX.R field (X64 only).
//! Force REX.R and/or VEX.R field (X64 only, used internally).
kX86_OpCodeR = 0x04000000u,
//! Force REX.W and/or VEX.W field (X64 only).
//! Force REX.W and/or VEX.W field (X64 only, used internally).
kX86_OpCodeW = 0x08000000u,
//! Force REX prefix (X64 only).
kX86_Rex = 0x40000000u,
@@ -216,7 +216,7 @@ enum class InstStringifyOptions : uint32_t {
};
ASMJIT_DEFINE_ENUM_FLAGS(InstStringifyOptions)
//! Instruction id, options, and extraReg in a single structure. This structure exists mainly to simplify analysis
//! Instruction id, options, and extra_reg in a single structure. This structure exists mainly to simplify analysis
//! and validation API that requires `BaseInst` and `Operand[]` array.
class BaseInst {
public:
@@ -224,11 +224,11 @@ public:
//! \{
//! Instruction id with modifiers.
InstId _id;
InstId _inst_id;
//! Instruction options.
InstOptions _options;
//! Extra register used by the instruction (either REP register or AVX-512 selector).
RegOnly _extraReg;
RegOnly _extra_reg;
enum Id : uint32_t {
//! Invalid or uninitialized instruction id.
@@ -246,20 +246,20 @@ public:
//!
//! Default values of `id` and `options` are zero, which means 'none' instruction. Such instruction is guaranteed
//! to never exist for any architecture supported by AsmJit.
ASMJIT_INLINE_NODEBUG explicit BaseInst(InstId instId = 0, InstOptions options = InstOptions::kNone) noexcept
: _id(instId),
ASMJIT_INLINE_NODEBUG explicit BaseInst(InstId inst_id = 0, InstOptions options = InstOptions::kNone) noexcept
: _inst_id(inst_id),
_options(options),
_extraReg() {}
_extra_reg() {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const RegOnly& extraReg) noexcept
: _id(instId),
ASMJIT_INLINE_NODEBUG BaseInst(InstId inst_id, InstOptions options, const RegOnly& extra_reg) noexcept
: _inst_id(inst_id),
_options(options),
_extraReg(extraReg) {}
_extra_reg(extra_reg) {}
ASMJIT_INLINE_NODEBUG BaseInst(InstId instId, InstOptions options, const Reg& extraReg) noexcept
: _id(instId),
ASMJIT_INLINE_NODEBUG BaseInst(InstId inst_id, InstOptions options, const Reg& extra_reg) noexcept
: _inst_id(inst_id),
_options(options),
_extraReg { extraReg.signature(), extraReg.id() } {}
_extra_reg{extra_reg.signature(), extra_reg.id()} {}
//! \}
@@ -268,27 +268,27 @@ public:
//! Returns the instruction id with modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId id() const noexcept { return _id; }
ASMJIT_INLINE_NODEBUG InstId inst_id() const noexcept { return _inst_id; }
//! Sets the instruction id and modiiers from `id`.
ASMJIT_INLINE_NODEBUG void setId(InstId id) noexcept { _id = id; }
//! Sets the instruction id and modifiers from `inst_id`.
ASMJIT_INLINE_NODEBUG void set_inst_id(InstId inst_id) noexcept { _inst_id = inst_id; }
//! Resets the instruction id and modifiers to zero, see \ref kIdNone.
ASMJIT_INLINE_NODEBUG void resetId() noexcept { _id = 0; }
ASMJIT_INLINE_NODEBUG void reset_inst_id() noexcept { _inst_id = 0; }
//! Returns a real instruction id that doesn't contain any modifiers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstId realId() const noexcept { return _id & uint32_t(InstIdParts::kRealId); }
ASMJIT_INLINE_NODEBUG InstId real_id() const noexcept { return _inst_id & uint32_t(InstIdParts::kRealId); }
template<InstIdParts kPart>
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t getInstIdPart() const noexcept {
return (uint32_t(_id) & uint32_t(kPart)) >> Support::ConstCTZ<uint32_t(kPart)>::value;
ASMJIT_INLINE_NODEBUG uint32_t inst_id_part() const noexcept {
return (uint32_t(_inst_id) & uint32_t(kPart)) >> Support::ctz_const<kPart>;
}
template<InstIdParts kPart>
ASMJIT_INLINE_NODEBUG void setInstIdPart(uint32_t value) noexcept {
_id = (_id & ~uint32_t(kPart)) | (value << Support::ConstCTZ<uint32_t(kPart)>::value);
ASMJIT_INLINE_NODEBUG void set_inst_id_part(uint32_t value) noexcept {
_inst_id = (_inst_id & ~uint32_t(kPart)) | (value << Support::ctz_const<kPart>);
}
//! \}
@@ -302,19 +302,19 @@ public:
//! Tests whether the given instruction `option` is enabled.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOption(InstOptions option) const noexcept { return Support::test(_options, option); }
ASMJIT_INLINE_NODEBUG bool has_option(InstOptions option) const noexcept { return Support::test(_options, option); }
//! Replaces all instruction options by the given `options`.
ASMJIT_INLINE_NODEBUG void setOptions(InstOptions options) noexcept { _options = options; }
ASMJIT_INLINE_NODEBUG void set_options(InstOptions options) noexcept { _options = options; }
//! Adds instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void addOptions(InstOptions options) noexcept { _options |= options; }
ASMJIT_INLINE_NODEBUG void add_options(InstOptions options) noexcept { _options |= options; }
//! Clears instruction options provided by `options`.
ASMJIT_INLINE_NODEBUG void clearOptions(InstOptions options) noexcept { _options &= ~options; }
ASMJIT_INLINE_NODEBUG void clear_options(InstOptions options) noexcept { _options &= ~options; }
//! Resets all instruction options to `InstOptions::kNone` (there will be no instruction options active after reset).
ASMJIT_INLINE_NODEBUG void resetOptions() noexcept { _options = InstOptions::kNone; }
ASMJIT_INLINE_NODEBUG void reset_options() noexcept { _options = InstOptions::kNone; }
//! \}
@@ -326,19 +326,19 @@ public:
//! \note Extra registers are currently only used on X86 by AVX-512 masking such as `{k}` and `{k}{z}` and by repeated
//! instructions to explicitly assign a virtual register that would be ECX/RCX.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasExtraReg() const noexcept { return _extraReg.isReg(); }
ASMJIT_INLINE_NODEBUG bool has_extra_reg() const noexcept { return _extra_reg.is_reg(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG RegOnly& extraReg() noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG RegOnly& extra_reg() noexcept { return _extra_reg; }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const RegOnly& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG const RegOnly& extra_reg() const noexcept { return _extra_reg; }
ASMJIT_INLINE_NODEBUG void setExtraReg(const Reg& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void set_extra_reg(const Reg& reg) noexcept { _extra_reg.init(reg); }
ASMJIT_INLINE_NODEBUG void setExtraReg(const RegOnly& reg) noexcept { _extraReg.init(reg); }
ASMJIT_INLINE_NODEBUG void set_extra_reg(const RegOnly& reg) noexcept { _extra_reg.init(reg); }
ASMJIT_INLINE_NODEBUG void resetExtraReg() noexcept { _extraReg.reset(); }
ASMJIT_INLINE_NODEBUG void reset_extra_reg() noexcept { _extra_reg.reset(); }
//! \}
@@ -346,15 +346,15 @@ public:
//! \{
[[nodiscard]]
ASMJIT_INLINE_NODEBUG arm::CondCode armCondCode() const noexcept { return (arm::CondCode)getInstIdPart<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG arm::CondCode arm_cond_code() const noexcept { return (arm::CondCode)inst_id_part<InstIdParts::kARM_Cond>(); }
ASMJIT_INLINE_NODEBUG void setArmCondCode(arm::CondCode cc) noexcept { setInstIdPart<InstIdParts::kARM_Cond>(uint32_t(cc)); }
ASMJIT_INLINE_NODEBUG void set_arm_cond_code(arm::CondCode cc) noexcept { set_inst_id_part<InstIdParts::kARM_Cond>(uint32_t(cc)); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT>(); }
ASMJIT_INLINE_NODEBUG a32::DataType arm_dt() const noexcept { return (a32::DataType)inst_id_part<InstIdParts::kA32_DT>(); }
[[nodiscard]]
ASMJIT_INLINE_NODEBUG a32::DataType armDt2() const noexcept { return (a32::DataType)getInstIdPart<InstIdParts::kA32_DT2>(); }
ASMJIT_INLINE_NODEBUG a32::DataType arm_dt2() const noexcept { return (a32::DataType)inst_id_part<InstIdParts::kA32_DT2>(); }
//! \}
@@ -362,31 +362,31 @@ public:
//! \{
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
static ASMJIT_INLINE_CONSTEXPR InstId compose_arm_inst_id(uint32_t id, arm::CondCode cc) noexcept {
return id | (uint32_t(cc) << Support::ctz_const<InstIdParts::kARM_Cond>);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
static ASMJIT_INLINE_CONSTEXPR InstId compose_arm_inst_id(uint32_t id, a32::DataType dt, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ctz_const<InstIdParts::kA32_DT>)
| (uint32_t(cc) << Support::ctz_const<InstIdParts::kARM_Cond>);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId composeARMInstId(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT)>::value)
| (uint32_t(dt2) << Support::ConstCTZ<uint32_t(InstIdParts::kA32_DT2)>::value)
| (uint32_t(cc) << Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
static ASMJIT_INLINE_CONSTEXPR InstId compose_arm_inst_id(uint32_t id, a32::DataType dt, a32::DataType dt2, arm::CondCode cc = arm::CondCode::kAL) noexcept {
return id | (uint32_t(dt) << Support::ctz_const<InstIdParts::kA32_DT>)
| (uint32_t(dt2) << Support::ctz_const<InstIdParts::kA32_DT2>)
| (uint32_t(cc) << Support::ctz_const<InstIdParts::kARM_Cond>);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR InstId extractRealId(uint32_t id) noexcept {
static ASMJIT_INLINE_CONSTEXPR InstId extract_real_id(uint32_t id) noexcept {
return id & uint32_t(InstIdParts::kRealId);
}
[[nodiscard]]
static ASMJIT_INLINE_CONSTEXPR arm::CondCode extractARMCondCode(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ConstCTZ<uint32_t(InstIdParts::kARM_Cond)>::value);
static ASMJIT_INLINE_CONSTEXPR arm::CondCode extract_arm_cond_code(uint32_t id) noexcept {
return (arm::CondCode)((uint32_t(id) & uint32_t(InstIdParts::kARM_Cond)) >> Support::ctz_const<InstIdParts::kARM_Cond>);
}
//! \}
@@ -481,15 +481,15 @@ enum class OpRWFlags : uint32_t {
//! this is used by vector load and store instructions that can load or store multiple registers at once.
kConsecutive = 0x00000008u,
//! The `extendByteMask()` represents a zero extension.
//! The `extend_byte_mask()` represents a zero extension.
kZExt = 0x00000010u,
//! The register must have assigned a unique physical ID, which cannot be assigned to any other register.
kUnique = 0x00000080u,
//! Register operand must use \ref OpRWInfo::physId().
//! Register operand must use \ref OpRWInfo::phys_id().
kRegPhysId = 0x00000100u,
//! Base register of a memory operand must use \ref OpRWInfo::physId().
//! Base register of a memory operand must use \ref OpRWInfo::phys_id().
kMemPhysId = 0x00000200u,
//! This memory operand is only used to encode registers and doesn't access memory.
@@ -533,21 +533,21 @@ struct OpRWInfo {
//! \{
//! Read/Write flags.
OpRWFlags _opFlags;
OpRWFlags _op_flags;
//! Physical register index, if required.
uint8_t _physId;
uint8_t _phys_id;
//! Size of a possible memory operand that can replace a register operand.
uint8_t _rmSize;
uint8_t _rm_size;
//! If non-zero, then this is a consecutive lead register, and the value describes how many registers follow.
uint8_t _consecutiveLeadCount;
uint8_t _consecutive_lead_count;
//! Reserved for future use.
uint8_t _reserved[1];
//! Read bit-mask where each bit represents one byte read from Reg/Mem.
uint64_t _readByteMask;
uint64_t _read_byte_mask;
//! Write bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _writeByteMask;
uint64_t _write_byte_mask;
//! Zero/Sign extend bit-mask where each bit represents one byte written to Reg/Mem.
uint64_t _extendByteMask;
uint64_t _extend_byte_mask;
//! \}
@@ -557,24 +557,23 @@ struct OpRWInfo {
//! Resets this operand information to all zeros.
ASMJIT_INLINE_NODEBUG void reset() noexcept { *this = OpRWInfo{}; }
//! Resets this operand info (resets all members) and set common information
//! to the given `opFlags`, `regSize`, and possibly `physId`.
inline void reset(OpRWFlags opFlags, uint32_t regSize, uint32_t physId = Reg::kIdBad) noexcept {
_opFlags = opFlags;
_physId = uint8_t(physId);
_rmSize = Support::test(opFlags, OpRWFlags::kRegMem) ? uint8_t(regSize) : uint8_t(0);
_consecutiveLeadCount = 0;
_resetReserved();
//! Resets this operand info (resets all members) and set common information to the given `op_flags`,
//! `register_size`, and possibly `phys_id`.
inline void reset(OpRWFlags op_flags, uint32_t register_size, uint32_t phys_id = Reg::kIdBad) noexcept {
_op_flags = op_flags;
_phys_id = uint8_t(phys_id);
_rm_size = Support::test(op_flags, OpRWFlags::kRegMem) ? uint8_t(register_size) : uint8_t(0);
_consecutive_lead_count = 0;
_reset_reserved();
uint64_t mask = Support::lsbMask<uint64_t>(Support::min<uint32_t>(regSize, 64));
_readByteMask = Support::test(opFlags, OpRWFlags::kRead) ? mask : uint64_t(0);
_writeByteMask = Support::test(opFlags, OpRWFlags::kWrite) ? mask : uint64_t(0);
_extendByteMask = 0;
uint64_t mask = Support::lsb_mask<uint64_t>(Support::min<uint32_t>(register_size, 64));
_read_byte_mask = Support::test(op_flags, OpRWFlags::kRead) ? mask : uint64_t(0);
_write_byte_mask = Support::test(op_flags, OpRWFlags::kWrite) ? mask : uint64_t(0);
_extend_byte_mask = 0;
}
ASMJIT_INLINE_NODEBUG void _resetReserved() noexcept {
_reserved[0] = 0;
ASMJIT_INLINE_NODEBUG void _reset_reserved() noexcept {
_reserved[0] = uint8_t(0);
}
//! \}
@@ -584,56 +583,56 @@ struct OpRWInfo {
//! Returns operand flags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG OpRWFlags opFlags() const noexcept { return _opFlags; }
ASMJIT_INLINE_NODEBUG OpRWFlags op_flags() const noexcept { return _op_flags; }
//! Tests whether operand flags contain the given `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasOpFlag(OpRWFlags flag) const noexcept { return Support::test(_opFlags, flag); }
ASMJIT_INLINE_NODEBUG bool has_op_flag(OpRWFlags flag) const noexcept { return Support::test(_op_flags, flag); }
//! Adds the given `flags` to operand flags.
ASMJIT_INLINE_NODEBUG void addOpFlags(OpRWFlags flags) noexcept { _opFlags |= flags; }
ASMJIT_INLINE_NODEBUG void add_op_flags(OpRWFlags flags) noexcept { _op_flags |= flags; }
//! Removes the given `flags` from operand flags.
ASMJIT_INLINE_NODEBUG void clearOpFlags(OpRWFlags flags) noexcept { _opFlags &= ~flags; }
ASMJIT_INLINE_NODEBUG void clear_op_flags(OpRWFlags flags) noexcept { _op_flags &= ~flags; }
//! Tests whether this operand is read from.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRead() const noexcept { return hasOpFlag(OpRWFlags::kRead); }
ASMJIT_INLINE_NODEBUG bool is_read() const noexcept { return has_op_flag(OpRWFlags::kRead); }
//! Tests whether this operand is written to.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWrite() const noexcept { return hasOpFlag(OpRWFlags::kWrite); }
ASMJIT_INLINE_NODEBUG bool is_write() const noexcept { return has_op_flag(OpRWFlags::kWrite); }
//! Tests whether this operand is both read and write.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadWrite() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRW; }
ASMJIT_INLINE_NODEBUG bool is_read_write() const noexcept { return (_op_flags & OpRWFlags::kRW) == OpRWFlags::kRW; }
//! Tests whether this operand is read only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isReadOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kRead; }
ASMJIT_INLINE_NODEBUG bool is_read_only() const noexcept { return (_op_flags & OpRWFlags::kRW) == OpRWFlags::kRead; }
//! Tests whether this operand is write only.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
ASMJIT_INLINE_NODEBUG bool is_write_only() const noexcept { return (_op_flags & OpRWFlags::kRW) == OpRWFlags::kWrite; }
//! Returns the type of a lead register, which is followed by consecutive registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t consecutiveLeadCount() const noexcept { return _consecutiveLeadCount; }
ASMJIT_INLINE_NODEBUG uint32_t consecutive_lead_count() const noexcept { return _consecutive_lead_count; }
//! Tests whether this operand is Reg/Mem
//!
//! Reg/Mem operands can use either register or memory.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isRm() const noexcept { return hasOpFlag(OpRWFlags::kRegMem); }
ASMJIT_INLINE_NODEBUG bool is_rm() const noexcept { return has_op_flag(OpRWFlags::kRegMem); }
//! Tests whether the operand will be zero extended.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isZExt() const noexcept { return hasOpFlag(OpRWFlags::kZExt); }
ASMJIT_INLINE_NODEBUG bool is_zext() const noexcept { return has_op_flag(OpRWFlags::kZExt); }
//! Tests whether the operand must have allocated a unique physical id that cannot be shared with other register
//! operands.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isUnique() const noexcept { return hasOpFlag(OpRWFlags::kUnique); }
ASMJIT_INLINE_NODEBUG bool is_unique() const noexcept { return has_op_flag(OpRWFlags::kUnique); }
//! \}
@@ -643,63 +642,63 @@ struct OpRWInfo {
//! Tests whether this is a fake memory operand, which is only used, because of encoding. Fake memory operands do
//! not access any memory, they are only used to encode registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemFake() const noexcept { return hasOpFlag(OpRWFlags::kMemFake); }
ASMJIT_INLINE_NODEBUG bool is_mem_fake() const noexcept { return has_op_flag(OpRWFlags::kMemFake); }
//! Tests whether the instruction's memory BASE register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRW); }
ASMJIT_INLINE_NODEBUG bool is_mem_base_used() const noexcept { return has_op_flag(OpRWFlags::kMemBaseRW); }
//! Tests whether the instruction reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseRead() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseRead); }
ASMJIT_INLINE_NODEBUG bool is_mem_base_read() const noexcept { return has_op_flag(OpRWFlags::kMemBaseRead); }
//! Tests whether the instruction writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemBaseWrite); }
ASMJIT_INLINE_NODEBUG bool is_mem_base_write() const noexcept { return has_op_flag(OpRWFlags::kMemBaseWrite); }
//! Tests whether the instruction reads and writes from/to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
ASMJIT_INLINE_NODEBUG bool is_mem_base_read_write() const noexcept { return (_op_flags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRW; }
//! Tests whether the instruction only reads from its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
ASMJIT_INLINE_NODEBUG bool is_mem_base_read_only() const noexcept { return (_op_flags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseRead; }
//! Tests whether the instruction only writes to its BASE registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBaseWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
ASMJIT_INLINE_NODEBUG bool is_mem_base_write_only() const noexcept { return (_op_flags & OpRWFlags::kMemBaseRW) == OpRWFlags::kMemBaseWrite; }
//! Tests whether the instruction modifies the BASE register before it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePreModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePreModify); }
ASMJIT_INLINE_NODEBUG bool is_mem_base_pre_modify() const noexcept { return has_op_flag(OpRWFlags::kMemBasePreModify); }
//! Tests whether the instruction modifies the BASE register after it uses it to calculate the target address.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemBasePostModify() const noexcept { return hasOpFlag(OpRWFlags::kMemBasePostModify); }
ASMJIT_INLINE_NODEBUG bool is_mem_base_post_modify() const noexcept { return has_op_flag(OpRWFlags::kMemBasePostModify); }
//! Tests whether the instruction's memory INDEX register is used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexUsed() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRW); }
ASMJIT_INLINE_NODEBUG bool is_mem_index_used() const noexcept { return has_op_flag(OpRWFlags::kMemIndexRW); }
//! Tests whether the instruction reads the INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexRead() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexRead); }
ASMJIT_INLINE_NODEBUG bool is_mem_index_read() const noexcept { return has_op_flag(OpRWFlags::kMemIndexRead); }
//! Tests whether the instruction writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWrite() const noexcept { return hasOpFlag(OpRWFlags::kMemIndexWrite); }
ASMJIT_INLINE_NODEBUG bool is_mem_index_write() const noexcept { return has_op_flag(OpRWFlags::kMemIndexWrite); }
//! Tests whether the instruction reads and writes from/to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadWrite() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
ASMJIT_INLINE_NODEBUG bool is_mem_index_read_write() const noexcept { return (_op_flags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRW; }
//! Tests whether the instruction only reads from its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexReadOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
ASMJIT_INLINE_NODEBUG bool is_mem_index_read_only() const noexcept { return (_op_flags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexRead; }
//! Tests whether the instruction only writes to its INDEX registers.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMemIndexWriteOnly() const noexcept { return (_opFlags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
ASMJIT_INLINE_NODEBUG bool is_mem_index_write_only() const noexcept { return (_op_flags & OpRWFlags::kMemIndexRW) == OpRWFlags::kMemIndexWrite; }
//! \}
@@ -710,14 +709,14 @@ struct OpRWInfo {
//!
//! Returns \ref Reg::kIdBad if any register can be used.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t physId() const noexcept { return _physId; }
ASMJIT_INLINE_NODEBUG uint32_t phys_id() const noexcept { return _phys_id; }
//! Tests whether \ref physId() would return a valid physical register id.
//! Tests whether \ref phys_id() would return a valid physical register id.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasPhysId() const noexcept { return _physId != Reg::kIdBad; }
ASMJIT_INLINE_NODEBUG bool has_phys_id() const noexcept { return _phys_id != Reg::kIdBad; }
//! Sets physical register id, which would be fixed for this operand.
ASMJIT_INLINE_NODEBUG void setPhysId(uint32_t physId) noexcept { _physId = uint8_t(physId); }
ASMJIT_INLINE_NODEBUG void set_phys_id(uint32_t phys_id) noexcept { _phys_id = uint8_t(phys_id); }
//! \}
@@ -726,10 +725,10 @@ struct OpRWInfo {
//! Returns Reg/Mem size of the operand.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmSize() const noexcept { return _rmSize; }
ASMJIT_INLINE_NODEBUG uint32_t rm_size() const noexcept { return _rm_size; }
//! Sets Reg/Mem size of the operand.
ASMJIT_INLINE_NODEBUG void setRmSize(uint32_t rmSize) noexcept { _rmSize = uint8_t(rmSize); }
ASMJIT_INLINE_NODEBUG void set_rm_size(uint32_t rm_size) noexcept { _rm_size = uint8_t(rm_size); }
//! \}
@@ -738,24 +737,24 @@ struct OpRWInfo {
//! Returns read mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t readByteMask() const noexcept { return _readByteMask; }
ASMJIT_INLINE_NODEBUG uint64_t read_byte_mask() const noexcept { return _read_byte_mask; }
//! Sets read mask.
ASMJIT_INLINE_NODEBUG void setReadByteMask(uint64_t mask) noexcept { _readByteMask = mask; }
ASMJIT_INLINE_NODEBUG void set_read_byte_mask(uint64_t mask) noexcept { _read_byte_mask = mask; }
//! Returns write mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t writeByteMask() const noexcept { return _writeByteMask; }
ASMJIT_INLINE_NODEBUG uint64_t write_byte_mask() const noexcept { return _write_byte_mask; }
//! Sets write mask.
ASMJIT_INLINE_NODEBUG void setWriteByteMask(uint64_t mask) noexcept { _writeByteMask = mask; }
ASMJIT_INLINE_NODEBUG void set_write_byte_mask(uint64_t mask) noexcept { _write_byte_mask = mask; }
//! Returns extend mask.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint64_t extendByteMask() const noexcept { return _extendByteMask; }
ASMJIT_INLINE_NODEBUG uint64_t extend_byte_mask() const noexcept { return _extend_byte_mask; }
//! Sets extend mask.
ASMJIT_INLINE_NODEBUG void setExtendByteMask(uint64_t mask) noexcept { _extendByteMask = mask; }
ASMJIT_INLINE_NODEBUG void set_extend_byte_mask(uint64_t mask) noexcept { _extend_byte_mask = mask; }
//! \}
};
@@ -778,19 +777,19 @@ struct InstRWInfo {
//! \{
//! Instruction flags (there are no flags at the moment, this field is reserved).
InstRWFlags _instFlags;
InstRWFlags _inst_flags;
//! CPU flags read.
CpuRWFlags _readFlags;
CpuRWFlags _read_flags;
//! CPU flags written.
CpuRWFlags _writeFlags;
CpuRWFlags _write_flags;
//! Count of operands.
uint8_t _opCount;
uint8_t _op_count;
//! CPU feature required for replacing register operand with memory operand.
uint8_t _rmFeature;
uint8_t _rm_feature;
//! Reserved for future use.
uint8_t _reserved[18];
//! Read/Write info of extra register (rep{} or kz{}).
OpRWInfo _extraReg;
OpRWInfo _extra_reg;
//! Read/Write info of instruction operands.
OpRWInfo _operands[Globals::kMaxOpCount];
@@ -809,15 +808,15 @@ struct InstRWInfo {
//! Returns flags associated with the instruction, see \ref InstRWFlags.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG InstRWFlags instFlags() const noexcept { return _instFlags; }
ASMJIT_INLINE_NODEBUG InstRWFlags inst_flags() const noexcept { return _inst_flags; }
//! Tests whether the instruction flags contain `flag`.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool hasInstFlag(InstRWFlags flag) const noexcept { return Support::test(_instFlags, flag); }
ASMJIT_INLINE_NODEBUG bool has_inst_flag(InstRWFlags flag) const noexcept { return Support::test(_inst_flags, flag); }
//! Tests whether the instruction flags contain \ref InstRWFlags::kMovOp.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG bool isMovOp() const noexcept { return hasInstFlag(InstRWFlags::kMovOp); }
ASMJIT_INLINE_NODEBUG bool is_mov_op() const noexcept { return has_inst_flag(InstRWFlags::kMovOp); }
//! \}
@@ -826,11 +825,11 @@ struct InstRWInfo {
//! Returns a mask of CPU flags read.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags readFlags() const noexcept { return _readFlags; }
ASMJIT_INLINE_NODEBUG CpuRWFlags read_flags() const noexcept { return _read_flags; }
//! Returns a mask of CPU flags written.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG CpuRWFlags writeFlags() const noexcept { return _writeFlags; }
ASMJIT_INLINE_NODEBUG CpuRWFlags write_flags() const noexcept { return _write_flags; }
//! \}
@@ -848,16 +847,16 @@ struct InstRWInfo {
//! VPSLLDQ instruction only supports `vpslldq reg, reg, imm` combination on AVX/AVX2 capable CPUs and requires
//! AVX-512 for `vpslldq reg, mem, imm` combination.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t rmFeature() const noexcept { return _rmFeature; }
ASMJIT_INLINE_NODEBUG uint32_t rm_feature() const noexcept { return _rm_feature; }
//! \}
//! \name Operand Read/Write Information
//! \{
//! Returns RW information of extra register operand (extraReg).
//! Returns RW information of extra register operand (extra_reg).
[[nodiscard]]
ASMJIT_INLINE_NODEBUG const OpRWInfo& extraReg() const noexcept { return _extraReg; }
ASMJIT_INLINE_NODEBUG const OpRWInfo& extra_reg() const noexcept { return _extra_reg; }
//! Returns RW information of all instruction's operands.
[[nodiscard]]
@@ -872,7 +871,7 @@ struct InstRWInfo {
//! Returns the number of operands this instruction has.
[[nodiscard]]
ASMJIT_INLINE_NODEBUG uint32_t opCount() const noexcept { return _opCount; }
ASMJIT_INLINE_NODEBUG uint32_t op_count() const noexcept { return _op_count; }
//! \}
};
@@ -890,37 +889,32 @@ ASMJIT_DEFINE_ENUM_FLAGS(ValidationFlags)
namespace InstAPI {
#ifndef ASMJIT_NO_TEXT
//! Appends the name of the instruction specified by `instId` and `options` into the `output` string.
//! Appends the name of the instruction specified by `inst_id` and `options` into the `output` string.
//!
//! \note Instruction options would only affect instruction prefix & suffix, other options would be ignored.
//! If `instOptions` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error instIdToString(Arch arch, InstId instId, InstStringifyOptions options, String& output) noexcept;
[[deprecated("Use `instIdToString()` with `InstStringifyOptions` parameter")]]
static inline Error instIdToString(Arch arch, InstId instId, String& output) noexcept {
return instIdToString(arch, instId, InstStringifyOptions::kNone, output);
}
//! If `inst_options` is zero then only raw instruction name (without any additional text) will be appended.
ASMJIT_API Error inst_id_to_string(Arch arch, InstId inst_id, InstStringifyOptions options, String& output) noexcept;
//! Parses an instruction name in the given string `s`. Length is specified by `len` argument, which can be
//! `SIZE_MAX` if `s` is known to be null terminated.
//!
//! Returns the parsed instruction id or \ref BaseInst::kIdNone if no such instruction exists.
[[nodiscard]]
ASMJIT_API InstId stringToInstId(Arch arch, const char* s, size_t len) noexcept;
ASMJIT_API InstId string_to_inst_id(Arch arch, const char* s, size_t len) noexcept;
#endif // !ASMJIT_NO_TEXT
#ifndef ASMJIT_NO_VALIDATION
//! Validates the given instruction considering the given `validationFlags`.
//! Validates the given instruction considering the given `validation_flags`.
[[nodiscard]]
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, ValidationFlags validationFlags = ValidationFlags::kNone) noexcept;
ASMJIT_API Error validate(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, ValidationFlags validation_flags = ValidationFlags::kNone) noexcept;
#endif // !ASMJIT_NO_VALIDATION
#ifndef ASMJIT_NO_INTROSPECTION
//! Gets Read/Write information of the given instruction.
ASMJIT_API Error queryRWInfo(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, InstRWInfo* out) noexcept;
ASMJIT_API Error query_rw_info(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, InstRWInfo* out) noexcept;
//! Gets CPU features required by the given instruction.
ASMJIT_API Error queryFeatures(Arch arch, const BaseInst& inst, const Operand_* operands, size_t opCount, CpuFeatures* out) noexcept;
ASMJIT_API Error query_features(Arch arch, const BaseInst& inst, const Operand_* operands, size_t op_count, CpuFeatures* out) noexcept;
#endif // !ASMJIT_NO_INTROSPECTION
} // {InstAPI}

View File

@@ -12,65 +12,65 @@ namespace InstNameUtils {
static constexpr uint32_t kBufferSize = 32;
static ASMJIT_INLINE_CONSTEXPR char decode5BitChar(uint32_t c) noexcept {
static ASMJIT_INLINE_CONSTEXPR char decode_5bit_char(uint32_t c) noexcept {
uint32_t base = c <= 26 ? uint32_t('a') - 1u : uint32_t('0') - 27u;
return char(base + c);
}
static ASMJIT_INLINE size_t decodeToBuffer(char nameOut[kBufferSize], uint32_t nameValue, InstStringifyOptions options, const char* stringTable) noexcept {
static ASMJIT_INLINE size_t decode_to_buffer(char name_out[kBufferSize], uint32_t name_value, InstStringifyOptions options, const char* string_table) noexcept {
size_t i;
if (nameValue & 0x80000000u) {
if (name_value & 0x80000000u) {
// Small string of 5-bit characters.
//
// NOTE: Small string optimization never provides additional
// aliases formatting, so we don't have to consider `options`.
for (i = 0; i < 6; i++, nameValue >>= 5) {
uint32_t c = nameValue & 0x1F;
for (i = 0; i < 6; i++, name_value >>= 5) {
uint32_t c = name_value & 0x1F;
if (c == 0)
break;
nameOut[i] = decode5BitChar(c);
name_out[i] = decode_5bit_char(c);
}
return i;
}
else {
size_t prefixBase = nameValue & 0xFFFu;
size_t prefixSize = (nameValue >> 12) & 0xFu;
size_t prefix_base = name_value & 0xFFFu;
size_t prefix_size = (name_value >> 12) & 0xFu;
size_t suffixBase = (nameValue >> 16) & 0xFFFu;
size_t suffixSize = (nameValue >> 28) & 0x7u;
size_t suffix_base = (name_value >> 16) & 0xFFFu;
size_t suffix_size = (name_value >> 28) & 0x7u;
if (Support::test(options, InstStringifyOptions::kAliases) && suffixBase == 0xFFFu) {
if (Support::test(options, InstStringifyOptions::kAliases) && suffix_base == 0xFFFu) {
// Alias formatting immediately follows the instruction name in string table.
// The first character specifies the length and then string data follows.
prefixBase += prefixSize;
prefixSize = uint8_t(stringTable[prefixBase]);
ASMJIT_ASSERT(prefixSize <= kBufferSize);
prefix_base += prefix_size;
prefix_size = uint8_t(string_table[prefix_base]);
ASMJIT_ASSERT(prefix_size <= kBufferSize);
prefixBase += 1; // Skip the byte that specifies the length of a formatted alias.
prefix_base += 1; // Skip the byte that specifies the length of a formatted alias.
}
for (i = 0; i < prefixSize; i++) {
nameOut[i] = stringTable[prefixBase + i];
for (i = 0; i < prefix_size; i++) {
name_out[i] = string_table[prefix_base + i];
}
char* suffixOut = nameOut + prefixSize;
for (i = 0; i < suffixSize; i++) {
suffixOut[i] = stringTable[suffixBase + i];
char* suffix_out = name_out + prefix_size;
for (i = 0; i < suffix_size; i++) {
suffix_out[i] = string_table[suffix_base + i];
}
return prefixSize + suffixSize;
return prefix_size + suffix_size;
}
}
Error decode(uint32_t nameValue, InstStringifyOptions options, const char* stringTable, String& output) noexcept {
char nameData[kBufferSize];
size_t nameSize = decodeToBuffer(nameData, nameValue, options, stringTable);
Error decode(uint32_t name_value, InstStringifyOptions options, const char* string_table, String& output) noexcept {
char name_data[kBufferSize];
size_t name_size = decode_to_buffer(name_data, name_value, options, string_table);
return output.append(nameData, nameSize);
return output.append(name_data, name_size);
}
InstId findInstruction(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, const InstNameIndex& nameIndex) noexcept {
InstId find_instruction(const char* s, size_t len, const uint32_t* name_table, const char* string_table, const InstNameIndex& name_index) noexcept {
ASMJIT_ASSERT(s != nullptr);
ASMJIT_ASSERT(len > 0u);
@@ -79,48 +79,48 @@ InstId findInstruction(const char* s, size_t len, const uint32_t* nameTable, con
return BaseInst::kIdNone;
}
size_t base = nameIndex.data[prefix].start;
size_t end = nameIndex.data[prefix].end;
size_t base = name_index.data[prefix].start;
size_t end = name_index.data[prefix].end;
if (ASMJIT_UNLIKELY(!base)) {
return BaseInst::kIdNone;
}
char nameData[kBufferSize];
char name_data[kBufferSize];
for (size_t lim = end - base; lim != 0; lim >>= 1) {
size_t instId = base + (lim >> 1);
size_t nameSize = decodeToBuffer(nameData, nameTable[instId], InstStringifyOptions::kNone, stringTable);
size_t inst_id = base + (lim >> 1);
size_t name_size = decode_to_buffer(name_data, name_table[inst_id], InstStringifyOptions::kNone, string_table);
int result = Support::compareStringViews(s, len, nameData, nameSize);
int result = Support::compare_string_views(s, len, name_data, name_size);
if (result < 0) {
continue;
}
if (result > 0) {
base = instId + 1;
base = inst_id + 1;
lim--;
continue;
}
return InstId(instId);
return InstId(inst_id);
}
return BaseInst::kIdNone;
}
uint32_t findAlias(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, uint32_t aliasNameCount) noexcept {
uint32_t find_alias(const char* s, size_t len, const uint32_t* name_table, const char* string_table, uint32_t alias_name_count) noexcept {
ASMJIT_ASSERT(s != nullptr);
ASMJIT_ASSERT(len > 0u);
size_t base = 0;
char nameData[kBufferSize];
char name_data[kBufferSize];
for (size_t lim = size_t(aliasNameCount) - base; lim != 0; lim >>= 1) {
for (size_t lim = size_t(alias_name_count) - base; lim != 0; lim >>= 1) {
size_t index = base + (lim >> 1);
size_t nameSize = decodeToBuffer(nameData, nameTable[index], InstStringifyOptions::kNone, stringTable);
size_t name_size = decode_to_buffer(name_data, name_table[index], InstStringifyOptions::kNone, string_table);
int result = Support::compareStringViews(s, len, nameData, nameSize);
int result = Support::compare_string_views(s, len, name_data, name_size);
if (result < 0) {
continue;
}

View File

@@ -22,14 +22,14 @@ struct InstNameIndex {
};
Span data[26];
uint16_t maxNameLength;
uint16_t max_name_length;
};
namespace InstNameUtils {
Error decode(uint32_t nameValue, InstStringifyOptions options, const char* stringTable, String& output) noexcept;
InstId findInstruction(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, const InstNameIndex& nameIndex) noexcept;
uint32_t findAlias(const char* s, size_t len, const uint32_t* nameTable, const char* stringTable, uint32_t aliasNameCount) noexcept;
Error decode(uint32_t name_value, InstStringifyOptions options, const char* string_table, String& output) noexcept;
InstId find_instruction(const char* s, size_t len, const uint32_t* name_table, const char* string_table, const InstNameIndex& name_index) noexcept;
uint32_t find_alias(const char* s, size_t len, const uint32_t* name_table, const char* string_table, uint32_t alias_name_count) noexcept;
} // {InstNameUtils}

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More