mirror of
https://github.com/asmjit/asmjit.git
synced 2025-12-16 20:17:05 +03:00
[abi] Improved Zone to use adaptive size of blocks
This commit is contained in:
@@ -16,7 +16,7 @@
|
||||
#define ASMJIT_LIBRARY_MAKE_VERSION(major, minor, patch) ((major << 16) | (minor << 8) | (patch))
|
||||
|
||||
//! AsmJit library version, see \ref ASMJIT_LIBRARY_MAKE_VERSION for a version format reference.
|
||||
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 13, 0)
|
||||
#define ASMJIT_LIBRARY_VERSION ASMJIT_LIBRARY_MAKE_VERSION(1, 14, 0)
|
||||
|
||||
//! \def ASMJIT_ABI_NAMESPACE
|
||||
//!
|
||||
@@ -27,7 +27,7 @@
|
||||
//! AsmJit default, which makes it possible to use multiple AsmJit libraries within a single project, totally
|
||||
//! controlled by users. This is useful especially in cases in which some of such library comes from third party.
|
||||
#if !defined(ASMJIT_ABI_NAMESPACE)
|
||||
#define ASMJIT_ABI_NAMESPACE _abi_1_13
|
||||
#define ASMJIT_ABI_NAMESPACE v1_14
|
||||
#endif // !ASMJIT_ABI_NAMESPACE
|
||||
|
||||
//! \}
|
||||
|
||||
@@ -45,9 +45,9 @@ static void BaseBuilder_deletePasses(BaseBuilder* self) noexcept {
|
||||
|
||||
BaseBuilder::BaseBuilder() noexcept
|
||||
: BaseEmitter(EmitterType::kBuilder),
|
||||
_codeZone(32768 - Zone::kBlockOverhead),
|
||||
_dataZone(16384 - Zone::kBlockOverhead),
|
||||
_passZone(65536 - Zone::kBlockOverhead),
|
||||
_codeZone(32u * 1024u),
|
||||
_dataZone(16u * 1024u),
|
||||
_passZone(64u * 1024u),
|
||||
_allocator(&_codeZone) {}
|
||||
|
||||
BaseBuilder::~BaseBuilder() noexcept {
|
||||
|
||||
@@ -123,7 +123,7 @@ CodeHolder::CodeHolder(const Support::Temporary* temporary) noexcept
|
||||
_baseAddress(Globals::kNoBaseAddress),
|
||||
_logger(nullptr),
|
||||
_errorHandler(nullptr),
|
||||
_zone(16384 - Zone::kBlockOverhead, 1, temporary),
|
||||
_zone(16u * 1024u, 1, temporary),
|
||||
_allocator(&_zone),
|
||||
_unresolvedLinkCount(0),
|
||||
_addressTableSection(nullptr) {}
|
||||
|
||||
@@ -51,7 +51,7 @@ public:
|
||||
BaseCompiler::BaseCompiler() noexcept
|
||||
: BaseBuilder(),
|
||||
_func(nullptr),
|
||||
_vRegZone(4096 - Zone::kBlockOverhead),
|
||||
_vRegZone(4u * 1024u),
|
||||
_vRegArray(),
|
||||
_constPools { nullptr, nullptr } {
|
||||
_emitterType = EmitterType::kCompiler;
|
||||
|
||||
@@ -240,7 +240,7 @@ void ConstPool::fill(void* dst) const noexcept {
|
||||
|
||||
#if defined(ASMJIT_TEST)
|
||||
UNIT(const_pool) {
|
||||
Zone zone(32384 - Zone::kBlockOverhead);
|
||||
Zone zone(32u * 1024u);
|
||||
ConstPool pool(&zone);
|
||||
|
||||
uint32_t i;
|
||||
|
||||
@@ -167,7 +167,7 @@ public:
|
||||
}
|
||||
|
||||
static inline Node* _newNode(Zone* zone, const void* data, size_t size, size_t offset, bool shared) noexcept {
|
||||
Node* node = zone->allocT<Node>(sizeof(Node) + size);
|
||||
Node* node = zone->allocT<Node>(Support::alignUp(sizeof(Node) + size, alignof(Node)));
|
||||
if (ASMJIT_UNLIKELY(!node)) return nullptr;
|
||||
|
||||
node = new(Support::PlacementNew{node}) Node(offset, shared);
|
||||
|
||||
@@ -1269,7 +1269,7 @@ public:
|
||||
Random _rng;
|
||||
|
||||
explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
|
||||
: _zone(1024 * 1024),
|
||||
: _zone(1024u * 1024u),
|
||||
_heap(&_zone),
|
||||
_allocator(params),
|
||||
_rng(0x123456789u) {}
|
||||
|
||||
@@ -544,7 +544,7 @@ namespace Internal {
|
||||
inline T addOverflowFallback(T x, T y, FastUInt8* of) noexcept {
|
||||
typedef typename std::make_unsigned<T>::type U;
|
||||
|
||||
U result = U(x) + U(y);
|
||||
U result = U(U(x) + U(y));
|
||||
*of = FastUInt8(*of | FastUInt8(isUnsigned<T>() ? result < U(x) : T((U(x) ^ ~U(y)) & (U(x) ^ result)) < 0));
|
||||
return T(result);
|
||||
}
|
||||
|
||||
@@ -14,7 +14,21 @@ ASMJIT_BEGIN_NAMESPACE
|
||||
|
||||
// Zero size block used by `Zone` that doesn't have any memory allocated. Should be allocated in read-only memory
|
||||
// and should never be modified.
|
||||
const Zone::Block Zone::_zeroBlock = { nullptr, nullptr, 0 };
|
||||
const Zone::Block Zone::_zeroBlock {};
|
||||
|
||||
static inline void Zone_assignZeroBlock(Zone* zone) noexcept {
|
||||
Zone::Block* block = const_cast<Zone::Block*>(&zone->_zeroBlock);
|
||||
zone->_ptr = block->data();
|
||||
zone->_end = block->data();
|
||||
zone->_block = block;
|
||||
}
|
||||
|
||||
static inline void Zone_assignBlock(Zone* zone, Zone::Block* block) noexcept {
|
||||
size_t alignment = zone->blockAlignment();
|
||||
zone->_ptr = Support::alignUp(block->data(), alignment);
|
||||
zone->_end = block->data() + block->size;
|
||||
zone->_block = block;
|
||||
}
|
||||
|
||||
// Zone - Initialization & Reset
|
||||
// =============================
|
||||
@@ -24,14 +38,17 @@ void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Tempora
|
||||
ASMJIT_ASSERT(blockSize <= kMaxBlockSize);
|
||||
ASMJIT_ASSERT(blockAlignment <= 64);
|
||||
|
||||
// Just to make the compiler happy...
|
||||
constexpr size_t kBlockSizeMask = (Support::allOnes<size_t>() >> 4);
|
||||
constexpr size_t kBlockAlignmentShiftMask = 0x7u;
|
||||
Zone_assignZeroBlock(this);
|
||||
|
||||
_assignZeroBlock();
|
||||
_blockSize = blockSize & kBlockSizeMask;
|
||||
_isTemporary = temporary != nullptr;
|
||||
_blockAlignmentShift = Support::ctz(blockAlignment) & kBlockAlignmentShiftMask;
|
||||
size_t blockSizeShift = Support::bitSizeOf<size_t>() - Support::clz(blockSize);
|
||||
size_t blockAlignmentShift = Support::bitSizeOf<size_t>() - Support::clz(blockAlignment | (size_t(1) << 3));
|
||||
|
||||
_blockAlignmentShift = uint8_t(blockAlignmentShift);
|
||||
_minimumBlockSizeShift = uint8_t(blockSizeShift);
|
||||
_maximumBlockSizeShift = uint8_t(25); // (1 << 25) Equals 32 MiB blocks (should be enough for all cases)
|
||||
_hasStaticBlock = uint8_t(temporary != nullptr);
|
||||
_reserved = uint8_t(0u);
|
||||
_blockCount = size_t(temporary != nullptr);
|
||||
|
||||
// Setup the first [temporary] block, if necessary.
|
||||
if (temporary) {
|
||||
@@ -42,7 +59,8 @@ void Zone::_init(size_t blockSize, size_t blockAlignment, const Support::Tempora
|
||||
ASMJIT_ASSERT(temporary->size() >= kBlockSize);
|
||||
block->size = temporary->size() - kBlockSize;
|
||||
|
||||
_assignBlock(block);
|
||||
Zone_assignBlock(this, block);
|
||||
_blockCount = 1u;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -50,14 +68,18 @@ void Zone::reset(ResetPolicy resetPolicy) noexcept {
|
||||
Block* cur = _block;
|
||||
|
||||
// Can't be altered.
|
||||
if (cur == &_zeroBlock)
|
||||
if (cur == &_zeroBlock) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (resetPolicy == ResetPolicy::kHard) {
|
||||
bool hasStatic = hasStaticBlock();
|
||||
Block* initial = const_cast<Zone::Block*>(&_zeroBlock);
|
||||
|
||||
_ptr = initial->data();
|
||||
_end = initial->data();
|
||||
_block = initial;
|
||||
_blockCount = size_t(hasStatic);
|
||||
|
||||
// Since cur can be in the middle of the double-linked list, we have to traverse both directions (`prev` and
|
||||
// `next`) separately to visit all.
|
||||
@@ -65,12 +87,11 @@ void Zone::reset(ResetPolicy resetPolicy) noexcept {
|
||||
do {
|
||||
Block* prev = cur->prev;
|
||||
|
||||
// If this is the first block and this ZoneTmp is temporary then the first block is statically allocated.
|
||||
// We cannot free it and it makes sense to keep it even when this is hard reset.
|
||||
if (prev == nullptr && _isTemporary) {
|
||||
if (prev == nullptr && hasStatic) {
|
||||
// If this is the first block and this Zone is actually a ZoneTmp then the first block cannot be freed.
|
||||
cur->prev = nullptr;
|
||||
cur->next = nullptr;
|
||||
_assignBlock(cur);
|
||||
Zone_assignBlock(this, cur);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -86,9 +107,10 @@ void Zone::reset(ResetPolicy resetPolicy) noexcept {
|
||||
}
|
||||
}
|
||||
else {
|
||||
while (cur->prev)
|
||||
while (cur->prev) {
|
||||
cur = cur->prev;
|
||||
_assignBlock(cur);
|
||||
}
|
||||
Zone_assignBlock(this, cur);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -99,68 +121,91 @@ void* Zone::_alloc(size_t size, size_t alignment) noexcept {
|
||||
Block* curBlock = _block;
|
||||
Block* next = curBlock->next;
|
||||
|
||||
size_t rawBlockAlignment = blockAlignment();
|
||||
size_t minimumAlignment = Support::max<size_t>(alignment, rawBlockAlignment);
|
||||
size_t defaultBlockAlignment = blockAlignment();
|
||||
size_t requiredBlockAlignment = Support::max<size_t>(alignment, defaultBlockAlignment);
|
||||
|
||||
// If the `Zone` has been cleared the current block doesn't have to be the last one. Check if there is a block
|
||||
// that can be used instead of allocating a new one. If there is a `next` block it's completely unused, we don't
|
||||
// have to check for remaining bytes in that case.
|
||||
if (next) {
|
||||
uint8_t* ptr = Support::alignUp(next->data(), minimumAlignment);
|
||||
uint8_t* end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
|
||||
uint8_t* ptr = Support::alignUp(next->data(), requiredBlockAlignment);
|
||||
uint8_t* end = next->data() + next->size;
|
||||
|
||||
if (size <= (size_t)(end - ptr)) {
|
||||
_block = next;
|
||||
_ptr = ptr + size;
|
||||
_end = Support::alignDown(next->data() + next->size, rawBlockAlignment);
|
||||
_end = end;
|
||||
return static_cast<void*>(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
size_t blockAlignmentOverhead = alignment - Support::min<size_t>(alignment, Globals::kAllocAlignment);
|
||||
size_t newSize = Support::max(blockSize(), size);
|
||||
// Calculates the "default" size of a next block - in most cases this would be enough for the allocation. In
|
||||
// general we want to gradually increase block size when more and more blocks are allocated until the maximum
|
||||
// block size. Since we use shifts (aka log2(size) sizes) we just need block count and minumum/maximum block
|
||||
// size shift to calculate the final size.
|
||||
size_t defaultBlockSizeShift = Support::min<size_t>(_blockCount + _minimumBlockSizeShift, _maximumBlockSizeShift);
|
||||
size_t defaultBlockSize = size_t(1) << defaultBlockSizeShift;
|
||||
|
||||
// Prevent arithmetic overflow.
|
||||
if (ASMJIT_UNLIKELY(newSize > SIZE_MAX - kBlockSize - blockAlignmentOverhead))
|
||||
// Allocate a new block. We have to accommodate all possible overheads so after the memory is allocated and then
|
||||
// properly aligned there will be size for the requested memory. In 99.9999% cases this is never a problem, but
|
||||
// we must be sure that even rare border cases would allocate properly.
|
||||
size_t alignmentOverhead = requiredBlockAlignment - Support::min<size_t>(requiredBlockAlignment, Globals::kAllocAlignment);
|
||||
size_t blockSizeOverhead = kBlockSize + Globals::kAllocOverhead + alignmentOverhead;
|
||||
|
||||
// If the requested size is larger than a default calculated block size -> increase block size so the allocation
|
||||
// would be enough to fit the requested size.
|
||||
size_t finalBlockSize = defaultBlockSize;
|
||||
|
||||
if (ASMJIT_UNLIKELY(size > defaultBlockSize - blockSizeOverhead)) {
|
||||
if (ASMJIT_UNLIKELY(size > SIZE_MAX - blockSizeOverhead)) {
|
||||
// This would probably never happen in practice - however, it needs to be done to stop malicious cases like
|
||||
// `alloc(SIZE_MAX)`.
|
||||
return nullptr;
|
||||
}
|
||||
finalBlockSize = size + alignmentOverhead + kBlockSize;
|
||||
}
|
||||
else {
|
||||
finalBlockSize -= Globals::kAllocOverhead;
|
||||
}
|
||||
|
||||
// Allocate new block.
|
||||
Block* newBlock = static_cast<Block*>(::malloc(finalBlockSize));
|
||||
|
||||
if (ASMJIT_UNLIKELY(!newBlock)) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Allocate new block - we add alignment overhead to `newSize`, which becomes the new block size, and we also add
|
||||
// `kBlockOverhead` to the allocator as it includes members of `Zone::Block` structure.
|
||||
newSize += blockAlignmentOverhead;
|
||||
Block* newBlock = static_cast<Block*>(::malloc(newSize + kBlockSize));
|
||||
|
||||
if (ASMJIT_UNLIKELY(!newBlock))
|
||||
return nullptr;
|
||||
// finalBlockSize includes the struct size, which must be avoided when assigning the size to a newly allocated block.
|
||||
size_t realBlockSize = finalBlockSize - kBlockSize;
|
||||
|
||||
// Align the pointer to `minimumAlignment` and adjust the size of this block accordingly. It's the same as using
|
||||
// `minimumAlignment - Support::alignUpDiff()`, just written differently.
|
||||
{
|
||||
newBlock->prev = nullptr;
|
||||
newBlock->next = nullptr;
|
||||
newBlock->size = newSize;
|
||||
newBlock->prev = nullptr;
|
||||
newBlock->next = nullptr;
|
||||
newBlock->size = realBlockSize;
|
||||
|
||||
if (curBlock != &_zeroBlock) {
|
||||
newBlock->prev = curBlock;
|
||||
curBlock->next = newBlock;
|
||||
if (curBlock != &_zeroBlock) {
|
||||
newBlock->prev = curBlock;
|
||||
curBlock->next = newBlock;
|
||||
|
||||
// Does only happen if there is a next block, but the requested memory can't fit into it. In this case a new
|
||||
// buffer is allocated and inserted between the current block and the next one.
|
||||
if (next) {
|
||||
newBlock->next = next;
|
||||
next->prev = newBlock;
|
||||
}
|
||||
// Does only happen if there is a next block, but the requested memory can't fit into it. In this case a new
|
||||
// buffer is allocated and inserted between the current block and the next one.
|
||||
if (next) {
|
||||
newBlock->next = next;
|
||||
next->prev = newBlock;
|
||||
}
|
||||
|
||||
uint8_t* ptr = Support::alignUp(newBlock->data(), minimumAlignment);
|
||||
uint8_t* end = Support::alignDown(newBlock->data() + newSize, rawBlockAlignment);
|
||||
|
||||
_ptr = ptr + size;
|
||||
_end = end;
|
||||
_block = newBlock;
|
||||
|
||||
ASMJIT_ASSERT(_ptr <= _end);
|
||||
return static_cast<void*>(ptr);
|
||||
}
|
||||
|
||||
uint8_t* ptr = Support::alignUp(newBlock->data(), requiredBlockAlignment);
|
||||
uint8_t* end = newBlock->data() + realBlockSize;
|
||||
|
||||
_ptr = ptr + size;
|
||||
_end = end;
|
||||
_block = newBlock;
|
||||
_blockCount++;
|
||||
|
||||
ASMJIT_ASSERT(_ptr <= _end);
|
||||
return static_cast<void*>(ptr);
|
||||
}
|
||||
|
||||
void* Zone::allocZeroed(size_t size, size_t alignment) noexcept {
|
||||
@@ -286,13 +331,13 @@ void* ZoneAllocator::_alloc(size_t size, size_t& allocatedSize) noexcept {
|
||||
}
|
||||
else {
|
||||
// Allocate a dynamic block.
|
||||
size_t kBlockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
|
||||
size_t blockOverhead = sizeof(DynamicBlock) + sizeof(DynamicBlock*) + kBlockAlignment;
|
||||
|
||||
// Handle a possible overflow.
|
||||
if (ASMJIT_UNLIKELY(kBlockOverhead >= SIZE_MAX - size))
|
||||
if (ASMJIT_UNLIKELY(blockOverhead >= SIZE_MAX - size))
|
||||
return nullptr;
|
||||
|
||||
void* p = ::malloc(size + kBlockOverhead);
|
||||
void* p = ::malloc(size + blockOverhead);
|
||||
if (ASMJIT_UNLIKELY(!p)) {
|
||||
allocatedSize = 0;
|
||||
return nullptr;
|
||||
|
||||
@@ -42,13 +42,14 @@ public:
|
||||
};
|
||||
|
||||
enum Limits : size_t {
|
||||
kBlockSize = sizeof(Block),
|
||||
kBlockOverhead = Globals::kAllocOverhead + kBlockSize,
|
||||
kMinBlockSize = 256, // The number is ridiculously small, but still possible.
|
||||
kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 1),
|
||||
|
||||
kMinBlockSize = 64, // The number is ridiculously small, but still possible.
|
||||
kMaxBlockSize = size_t(1) << (sizeof(size_t) * 8 - 4 - 1),
|
||||
kMinAlignment = 1,
|
||||
kMaxAlignment = 64
|
||||
kMaxAlignment = 64,
|
||||
|
||||
kBlockSize = sizeof(Block),
|
||||
kBlockOverhead = kBlockSize + Globals::kAllocOverhead
|
||||
};
|
||||
|
||||
//! Pointer in the current block.
|
||||
@@ -58,17 +59,18 @@ public:
|
||||
//! Current block.
|
||||
Block* _block;
|
||||
|
||||
union {
|
||||
struct {
|
||||
//! Default block size.
|
||||
size_t _blockSize : Support::bitSizeOf<size_t>() - 4;
|
||||
//! First block is temporary (ZoneTmp).
|
||||
size_t _isTemporary : 1;
|
||||
//! Block alignment (1 << alignment).
|
||||
size_t _blockAlignmentShift : 3;
|
||||
};
|
||||
size_t _packedData;
|
||||
};
|
||||
//! Block alignment shift
|
||||
uint8_t _blockAlignmentShift;
|
||||
//! Minimum log2(blockSize) to allocate.
|
||||
uint8_t _minimumBlockSizeShift;
|
||||
//! Maximum log2(blockSize) to allocate.
|
||||
uint8_t _maximumBlockSizeShift;
|
||||
//! True when the Zone has a static block (static blocks are used by ZoneTmp).
|
||||
uint8_t _hasStaticBlock;
|
||||
//! Reserved for future use, must be zero.
|
||||
uint32_t _reserved;
|
||||
//! Count of allocated blocks.
|
||||
size_t _blockCount;
|
||||
|
||||
static ASMJIT_API const Block _zeroBlock;
|
||||
|
||||
@@ -85,33 +87,39 @@ public:
|
||||
//!
|
||||
//! It's not required, but it's good practice to set `blockSize` to a reasonable value that depends on the usage
|
||||
//! of `Zone`. Greater block sizes are generally safer and perform better than unreasonably low block sizes.
|
||||
ASMJIT_INLINE_NODEBUG explicit Zone(size_t blockSize, size_t blockAlignment = 1) noexcept {
|
||||
_init(blockSize, blockAlignment, nullptr);
|
||||
ASMJIT_INLINE_NODEBUG explicit Zone(size_t minimumBlockSize, size_t blockAlignment = 1) noexcept {
|
||||
_init(minimumBlockSize, blockAlignment, nullptr);
|
||||
}
|
||||
|
||||
//! Creates a new Zone with a first block pointing to a `temporary` memory.
|
||||
ASMJIT_INLINE_NODEBUG Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
|
||||
_init(blockSize, blockAlignment, &temporary);
|
||||
ASMJIT_INLINE_NODEBUG Zone(size_t minimumBlockSize, size_t blockAlignment, const Support::Temporary& temporary) noexcept {
|
||||
_init(minimumBlockSize, blockAlignment, &temporary);
|
||||
}
|
||||
|
||||
//! \overload
|
||||
ASMJIT_INLINE_NODEBUG Zone(size_t blockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
|
||||
_init(blockSize, blockAlignment, temporary);
|
||||
ASMJIT_INLINE_NODEBUG Zone(size_t minimumBlockSize, size_t blockAlignment, const Support::Temporary* temporary) noexcept {
|
||||
_init(minimumBlockSize, blockAlignment, temporary);
|
||||
}
|
||||
|
||||
//! Moves an existing `Zone`.
|
||||
//!
|
||||
//! \note You cannot move an existing `ZoneTmp` as it uses embedded storage. Attempting to move `ZoneTmp` would
|
||||
//! result in assertion failure in debug mode and undefined behavior in release mode.
|
||||
//! cause an undefined behavior (covered by assertions in debug mode).
|
||||
inline Zone(Zone&& other) noexcept
|
||||
: _ptr(other._ptr),
|
||||
_end(other._end),
|
||||
_block(other._block),
|
||||
_packedData(other._packedData) {
|
||||
ASMJIT_ASSERT(!other.isTemporary());
|
||||
_blockAlignmentShift(other._blockAlignmentShift),
|
||||
_minimumBlockSizeShift(other._minimumBlockSizeShift),
|
||||
_maximumBlockSizeShift(other._maximumBlockSizeShift),
|
||||
_hasStaticBlock(other._hasStaticBlock),
|
||||
_reserved(other._reserved),
|
||||
_blockCount(other._blockCount) {
|
||||
ASMJIT_ASSERT(!other.hasStaticBlock());
|
||||
other._block = const_cast<Block*>(&_zeroBlock);
|
||||
other._ptr = other._block->data();
|
||||
other._end = other._block->data();
|
||||
other._blockCount = 0u;
|
||||
}
|
||||
|
||||
//! Destroys the `Zone` instance.
|
||||
@@ -132,13 +140,15 @@ public:
|
||||
//! \name Accessors
|
||||
//! \{
|
||||
|
||||
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
|
||||
ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return _isTemporary != 0; }
|
||||
|
||||
//! Returns the default block size.
|
||||
ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return _blockSize; }
|
||||
//! Returns the default block alignment.
|
||||
ASMJIT_INLINE_NODEBUG size_t blockAlignment() const noexcept { return size_t(1) << _blockAlignmentShift; }
|
||||
//! Returns a minimum block size.
|
||||
ASMJIT_INLINE_NODEBUG size_t minimumBlockSize() const noexcept { return size_t(1) << _minimumBlockSizeShift; }
|
||||
//! Returns a maximum block size.
|
||||
ASMJIT_INLINE_NODEBUG size_t maximumBlockSize() const noexcept { return size_t(1) << _maximumBlockSizeShift; }
|
||||
//! Tests whether this `Zone` is actually a `ZoneTmp` that uses temporary memory.
|
||||
ASMJIT_INLINE_NODEBUG uint8_t hasStaticBlock() const noexcept { return _hasStaticBlock; }
|
||||
|
||||
//! Returns remaining size of the current block.
|
||||
ASMJIT_INLINE_NODEBUG size_t remainingSize() const noexcept { return (size_t)(_end - _ptr); }
|
||||
|
||||
@@ -175,13 +185,19 @@ public:
|
||||
|
||||
inline void swap(Zone& other) noexcept {
|
||||
// This could lead to a disaster.
|
||||
ASMJIT_ASSERT(!this->isTemporary());
|
||||
ASMJIT_ASSERT(!other.isTemporary());
|
||||
ASMJIT_ASSERT(!this->hasStaticBlock());
|
||||
ASMJIT_ASSERT(!other.hasStaticBlock());
|
||||
|
||||
std::swap(_ptr, other._ptr);
|
||||
std::swap(_end, other._end);
|
||||
std::swap(_block, other._block);
|
||||
std::swap(_packedData, other._packedData);
|
||||
|
||||
std::swap(_blockAlignmentShift, other._blockAlignmentShift);
|
||||
std::swap(_minimumBlockSizeShift, other._minimumBlockSizeShift);
|
||||
std::swap(_maximumBlockSizeShift, other._maximumBlockSizeShift);
|
||||
std::swap(_hasStaticBlock, other._hasStaticBlock);
|
||||
std::swap(_reserved, other._reserved);
|
||||
std::swap(_blockCount, other._blockCount);
|
||||
}
|
||||
|
||||
//! Aligns the current pointer to `alignment`.
|
||||
@@ -194,26 +210,12 @@ public:
|
||||
//! \note This function doesn't respect any alignment. If you need to ensure there is enough room for an aligned
|
||||
//! allocation you need to call `align()` before calling `ensure()`.
|
||||
ASMJIT_INLINE_NODEBUG Error ensure(size_t size) noexcept {
|
||||
if (size <= remainingSize())
|
||||
if (ASMJIT_LIKELY(size <= remainingSize()))
|
||||
return kErrorOk;
|
||||
else
|
||||
return _alloc(0, 1) ? kErrorOk : DebugUtils::errored(kErrorOutOfMemory);
|
||||
}
|
||||
|
||||
inline void _assignBlock(Block* block) noexcept {
|
||||
size_t alignment = blockAlignment();
|
||||
_ptr = Support::alignUp(block->data(), alignment);
|
||||
_end = Support::alignDown(block->data() + block->size, alignment);
|
||||
_block = block;
|
||||
}
|
||||
|
||||
inline void _assignZeroBlock() noexcept {
|
||||
Block* block = const_cast<Block*>(&_zeroBlock);
|
||||
_ptr = block->data();
|
||||
_end = block->data();
|
||||
_block = block;
|
||||
}
|
||||
|
||||
//! \}
|
||||
|
||||
//! \name Allocation
|
||||
@@ -229,8 +231,8 @@ public:
|
||||
//!
|
||||
//! class Object { ... };
|
||||
//!
|
||||
//! // Create Zone with default block size of approximately 65536 bytes.
|
||||
//! Zone zone(65536 - Zone::kBlockOverhead);
|
||||
//! // Create Zone with default block size of 65536 bytes (the maximum size per alloc() would be slightly less).
|
||||
//! Zone zone(65536);
|
||||
//!
|
||||
//! // Create your objects using zone object allocating, for example:
|
||||
//! Object* obj = static_cast<Object*>( zone.alloc(sizeof(Object)) );
|
||||
@@ -353,6 +355,14 @@ public:
|
||||
ASMJIT_API char* sformat(const char* str, ...) noexcept;
|
||||
|
||||
//! \}
|
||||
|
||||
#if !defined(ASMJIT_NO_DEPRECATED)
|
||||
ASMJIT_DEPRECATED("Use Zone::minimumBlockSize() instead of Zone::blockSize()")
|
||||
ASMJIT_INLINE_NODEBUG size_t blockSize() const noexcept { return minimumBlockSize(); }
|
||||
|
||||
ASMJIT_DEPRECATED("Use Zone::hasStaticBlock() instead of Zone::isTemporary()")
|
||||
ASMJIT_INLINE_NODEBUG bool isTemporary() const noexcept { return hasStaticBlock() != 0u; }
|
||||
#endif
|
||||
};
|
||||
|
||||
//! \ref Zone with `N` bytes of a static storage, used for the initial block.
|
||||
|
||||
@@ -175,7 +175,7 @@ static void test_zone_stack(ZoneAllocator* allocator, const char* typeName) {
|
||||
}
|
||||
|
||||
UNIT(zone_stack) {
|
||||
Zone zone(8096 - Zone::kBlockOverhead);
|
||||
Zone zone(8096);
|
||||
ZoneAllocator allocator(&zone);
|
||||
|
||||
test_zone_stack<int>(&allocator, "int");
|
||||
|
||||
@@ -364,7 +364,7 @@ static void test_zone_vector(ZoneAllocator* allocator, const char* typeName) {
|
||||
}
|
||||
|
||||
static void test_zone_bitvector(ZoneAllocator* allocator) {
|
||||
Zone zone(8096 - Zone::kBlockOverhead);
|
||||
Zone zone(8096);
|
||||
|
||||
uint32_t i, count;
|
||||
uint32_t kMaxCount = 100;
|
||||
@@ -411,7 +411,7 @@ static void test_zone_bitvector(ZoneAllocator* allocator) {
|
||||
}
|
||||
|
||||
UNIT(zone_vector) {
|
||||
Zone zone(8096 - Zone::kBlockOverhead);
|
||||
Zone zone(8096);
|
||||
ZoneAllocator allocator(&zone);
|
||||
|
||||
test_zone_vector<int>(&allocator, "int");
|
||||
|
||||
Reference in New Issue
Block a user