[Bug] Virtual memory changes - addressing remaining issues on NetBSD

Changes related to VirtMem
  * Moved memory mapping code to mapMemory() and unmapMemory() helper functions
  * Added MemoryFlags::kMapShared to optionally allow MAP_SHARED when necessary
  * Clarified that MAP_SHARED is used by dual mapping by default
  * Extends MemoryFlags::kMMapMax... to use PROT_MPROTECT on NetBSD
  * Changed NetBSD dual mapping code to use MAP_SHARED to avoid COW

Changes related to JitAllocator
  * Propagate error from VirtMem (not just OOM) when VirtMem allocation failed
  * Added more tests to verify that RW + RX regions share the same content
This commit is contained in:
kobalicek
2023-03-02 13:53:55 +01:00
parent d4dfd8e865
commit 915186f6c5
4 changed files with 211 additions and 102 deletions

View File

@@ -506,7 +506,7 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t
// //
// NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block // NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block
// is only allocated when it's actually needed, so it would be cleared anyway. // is only allocated when it's actually needed, so it would be cleared anyway.
static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept { static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock** dst, JitAllocatorPool* pool, size_t blockSize) noexcept {
using Support::BitWord; using Support::BitWord;
using Support::kBitWordSizeInBits; using Support::kBitWordSizeInBits;
@@ -541,7 +541,10 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp
if (block) if (block)
::free(block); ::free(block);
return nullptr; if (err)
return err;
else
return kErrorOutOfMemory;
} }
// Fill the memory if the secure mode is enabled. // Fill the memory if the secure mode is enabled.
@@ -551,7 +554,8 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp
} }
memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord)); memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize); *dst = new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
return kErrorOk;
} }
static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
@@ -798,12 +802,9 @@ Error JitAllocator::alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcep
if (ASMJIT_UNLIKELY(!blockSize)) if (ASMJIT_UNLIKELY(!blockSize))
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(kErrorOutOfMemory);
block = JitAllocatorImpl_newBlock(impl, pool, blockSize); ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize));
areaIndex = 0; areaIndex = 0;
if (ASMJIT_UNLIKELY(!block))
return DebugUtils::errored(kErrorOutOfMemory);
JitAllocatorImpl_insertBlock(impl, block); JitAllocatorImpl_insertBlock(impl, block);
block->_searchStart = areaSize; block->_searchStart = areaSize;
block->_largestUnusedArea = block->areaSize() - areaSize; block->_largestUnusedArea = block->areaSize() - areaSize;
@@ -1018,6 +1019,34 @@ public:
uint64_t _state[2]; uint64_t _state[2];
}; };
namespace JitAllocatorUtils {
static void fillPattern64(void* p_, uint64_t pattern, size_t sizeInBytes) noexcept {
uint64_t* p = static_cast<uint64_t*>(p_);
size_t n = sizeInBytes / 8u;
for (size_t i = 0; i < n; i++)
p[i] = pattern;
}
static bool verifyPattern64(const void* p_, uint64_t pattern, size_t sizeInBytes) noexcept {
const uint64_t* p = static_cast<const uint64_t*>(p_);
size_t n = sizeInBytes / 8u;
for (size_t i = 0; i < n; i++) {
if (p[i] != pattern) {
INFO("Pattern verification failed at 0x%p [%zu * 8]: value(0x%016llX) != expected(0x%016llX)",
p,
i,
(unsigned long long)p[i],
(unsigned long long)pattern);
return false;
}
}
return true;
}
}
// Helper class to verify that JitAllocator doesn't return addresses that overlap. // Helper class to verify that JitAllocator doesn't return addresses that overlap.
class JitAllocatorWrapper { class JitAllocatorWrapper {
public: public:
@@ -1035,9 +1064,19 @@ public:
class Record : public ZoneTreeNodeT<Record>, class Record : public ZoneTreeNodeT<Record>,
public Range { public Range {
public: public:
inline Record(uint8_t* addr, size_t size) //! Read/write address, in case this is a dual mapping.
void* _rw;
//! Describes a pattern used to fill the allocated memory.
uint64_t pattern;
inline Record(void* rx, void* rw, size_t size, uint64_t pattern)
: ZoneTreeNodeT<Record>(), : ZoneTreeNodeT<Record>(),
Range(addr, size) {} Range(static_cast<uint8_t*>(rx), size),
_rw(rw),
pattern(pattern) {}
inline void* rx() const noexcept { return addr; }
inline void* rw() const noexcept { return _rw; }
inline bool operator<(const Record& other) const noexcept { return addr < other.addr; } inline bool operator<(const Record& other) const noexcept { return addr < other.addr; }
inline bool operator>(const Record& other) const noexcept { return addr > other.addr; } inline bool operator>(const Record& other) const noexcept { return addr > other.addr; }
@@ -1050,14 +1089,16 @@ public:
ZoneAllocator _heap; ZoneAllocator _heap;
ZoneTree<Record> _records; ZoneTree<Record> _records;
JitAllocator _allocator; JitAllocator _allocator;
Random _rng;
explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
: _zone(1024 * 1024), : _zone(1024 * 1024),
_heap(&_zone), _heap(&_zone),
_allocator(params) {} _allocator(params),
_rng(0x123456789u) {}
void _insert(void* p_, size_t size) noexcept { void _insert(void* pRX, void* pRW, size_t size) noexcept {
uint8_t* p = static_cast<uint8_t*>(p_); uint8_t* p = static_cast<uint8_t*>(pRX);
uint8_t* pEnd = p + size - 1; uint8_t* pEnd = p + size - 1;
Record* record; Record* record;
@@ -1070,9 +1111,18 @@ public:
if (record) if (record)
EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size); EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
record = _heap.newT<Record>(p, size); uint64_t pattern = _rng.nextUInt64();
record = _heap.newT<Record>(pRX, pRW, size, pattern);
EXPECT(record != nullptr, "Out of memory, cannot allocate 'Record'"); EXPECT(record != nullptr, "Out of memory, cannot allocate 'Record'");
{
VirtMem::ProtectJitReadWriteScope scope(pRW, size);
JitAllocatorUtils::fillPattern64(pRW, pattern, size);
}
VirtMem::flushInstructionCache(pRX, size);
EXPECT(JitAllocatorUtils::verifyPattern64(pRX, pattern, size) == true);
_records.insert(record); _records.insert(record);
} }
@@ -1080,6 +1130,9 @@ public:
Record* record = _records.get(static_cast<uint8_t*>(p)); Record* record = _records.get(static_cast<uint8_t*>(p));
EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p); EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p);
EXPECT(JitAllocatorUtils::verifyPattern64(record->rx(), record->pattern, record->size) == true);
EXPECT(JitAllocatorUtils::verifyPattern64(record->rw(), record->pattern, record->size) == true);
_records.remove(record); _records.remove(record);
_heap.release(record, sizeof(Record)); _heap.release(record, sizeof(Record));
} }
@@ -1091,7 +1144,7 @@ public:
Error err = _allocator.alloc(&rxPtr, &rwPtr, size); Error err = _allocator.alloc(&rxPtr, &rwPtr, size);
EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size); EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size);
_insert(rxPtr, size); _insert(rxPtr, rwPtr, size);
return rxPtr; return rxPtr;
} }
@@ -1156,7 +1209,7 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
} }
static void test_jit_allocator_alloc_release() noexcept { static void test_jit_allocator_alloc_release() noexcept {
size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000; size_t kCount = BrokenAPI::hasArg("--quick") ? 20000 : 100000;
struct TestParams { struct TestParams {
const char* name; const char* name;
@@ -1316,8 +1369,8 @@ UNIT(jit_allocator) {
test_jit_allocator_alloc_release(); test_jit_allocator_alloc_release();
test_jit_allocator_query(); test_jit_allocator_query();
} }
#endif #endif // ASMJIT_TEST
ASMJIT_END_NAMESPACE ASMJIT_END_NAMESPACE
#endif #endif // !ASMJIT_NO_JIT

View File

@@ -76,7 +76,7 @@ uint32_t OSUtils::getTickCount() noexcept {
uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u); uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u);
return uint32_t(t & 0xFFFFFFFFu); return uint32_t(t & 0xFFFFFFFFu);
#else #else
#pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.") #pragma message("[asmjit] OSUtils::getTickCount() doesn't have implementation for the target OS.")
return 0; return 0;
#endif #endif
} }

View File

@@ -43,7 +43,7 @@
#define MAP_ANONYMOUS MAP_ANON #define MAP_ANONYMOUS MAP_ANON
#endif #endif
#define ASMJIT_DUAL_MAPPING_ANON_FD #define ASMJIT_ANONYMOUS_MEMORY_USE_FD
#if defined(__APPLE__) || defined(__BIONIC__) #if defined(__APPLE__) || defined(__BIONIC__)
#define ASMJIT_VM_SHM_DETECT 0 #define ASMJIT_VM_SHM_DETECT 0
@@ -61,8 +61,8 @@
#endif #endif
#if defined(__NetBSD__) && defined(MAP_REMAPDUP) && defined(PROT_MPROTECT) #if defined(__NetBSD__) && defined(MAP_REMAPDUP) && defined(PROT_MPROTECT)
#undef ASMJIT_DUAL_MAPPING_ANON_FD #undef ASMJIT_ANONYMOUS_MEMORY_USE_FD
#define ASMJIT_DUAL_MAPPING_REMAPDUP #define ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP
#endif #endif
#endif #endif
@@ -73,6 +73,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem)
// Virtual Memory Utilities // Virtual Memory Utilities
// ======================== // ========================
ASMJIT_MAYBE_UNUSED
static const constexpr MemoryFlags dualMappingFilter[2] = { static const constexpr MemoryFlags dualMappingFilter[2] = {
MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite, MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite,
MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute
@@ -286,13 +287,13 @@ static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
// Virtual Memory [Posix] - Anonymus Memory // Virtual Memory [Posix] - Anonymus Memory
// ======================================== // ========================================
#if defined(ASMJIT_DUAL_MAPPING_ANON_FD) #if defined(ASMJIT_ANONYMOUS_MEMORY_USE_FD)
// Some operating systems don't allow /dev/shm to be executable. On Linux this happens when /dev/shm is mounted with // Some operating systems don't allow /dev/shm to be executable. On Linux this happens when /dev/shm is mounted with
// 'noexec', which is enforced by systemd. Other operating systems like MacOS also restrict executable permissions // 'noexec', which is enforced by systemd. Other operating systems like MacOS also restrict executable permissions
// regarding /dev/shm, so we use a runtime detection before attempting to allocate executable memory. Sometimes we // regarding /dev/shm, so we use a runtime detection before attempting to allocate executable memory. Sometimes we
// don't need the detection as we know it would always result in `AnonymousMemStrategy::kTmpDir`. // don't need the detection as we know it would always result in `AnonymousMemoryStrategy::kTmpDir`.
enum class AnonymousMemStrategy : uint32_t { enum class AnonymousMemoryStrategy : uint32_t {
kUnknown = 0, kUnknown = 0,
kDevShm = 1, kDevShm = 1,
kTmpDir = 2 kTmpDir = 2
@@ -333,9 +334,13 @@ public:
#if defined(__linux__) && defined(__NR_memfd_create) #if defined(__linux__) && defined(__NR_memfd_create)
// Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means // Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means
// it's not available and we will never call it again (would be pointless). // it's not available and we will never call it again (would be pointless).
//
// NOTE: There is also memfd_create() libc function in FreeBSD, but it internally
// uses `shm_open(SHM_ANON, ...)` so it's not needed to add support for it (it's
// not a syscall as in Linux).
// Zero initialized, if ever changed to '1' that would mean the syscall is not // Zero initialized, if ever changed to '1' that would mean the syscall is not
// available and we must use `shm_open()` and `shm_unlink()`. // available and we must use `shm_open()` and `shm_unlink()` (or regular `open()`).
static volatile uint32_t memfd_create_not_supported; static volatile uint32_t memfd_create_not_supported;
if (!memfd_create_not_supported) { if (!memfd_create_not_supported) {
@@ -440,7 +445,7 @@ public:
}; };
#if ASMJIT_VM_SHM_DETECT #if ASMJIT_VM_SHM_DETECT
static Error detectAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept { static Error detectAnonymousMemoryStrategy(AnonymousMemoryStrategy* strategyOut) noexcept {
AnonymousMemory anonMem; AnonymousMemory anonMem;
Info vmInfo = info(); Info vmInfo = info();
@@ -451,39 +456,39 @@ static Error detectAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept {
if (ptr == MAP_FAILED) { if (ptr == MAP_FAILED) {
int e = errno; int e = errno;
if (e == EINVAL) { if (e == EINVAL) {
*strategyOut = AnonymousMemStrategy::kTmpDir; *strategyOut = AnonymousMemoryStrategy::kTmpDir;
return kErrorOk; return kErrorOk;
} }
return DebugUtils::errored(asmjitErrorFromErrno(e)); return DebugUtils::errored(asmjitErrorFromErrno(e));
} }
else { else {
munmap(ptr, vmInfo.pageSize); munmap(ptr, vmInfo.pageSize);
*strategyOut = AnonymousMemStrategy::kDevShm; *strategyOut = AnonymousMemoryStrategy::kDevShm;
return kErrorOk; return kErrorOk;
} }
} }
#endif #endif
static Error getAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept { static Error getAnonymousMemoryStrategy(AnonymousMemoryStrategy* strategyOut) noexcept {
#if ASMJIT_VM_SHM_DETECT #if ASMJIT_VM_SHM_DETECT
// Initially don't assume anything. It has to be tested whether '/dev/shm' was mounted with 'noexec' flag or not. // Initially don't assume anything. It has to be tested whether '/dev/shm' was mounted with 'noexec' flag or not.
static std::atomic<uint32_t> globalShmStrategy; static std::atomic<uint32_t> globalStrategy;
AnonymousMemStrategy strategy = static_cast<AnonymousMemStrategy>(globalShmStrategy.load()); AnonymousMemoryStrategy strategy = static_cast<AnonymousMemoryStrategy>(globalStrategy.load());
if (strategy == AnonymousMemStrategy::kUnknown) { if (strategy == AnonymousMemoryStrategy::kUnknown) {
ASMJIT_PROPAGATE(detectAnonMemStrategy(&strategy)); ASMJIT_PROPAGATE(detectAnonymousMemoryStrategy(&strategy));
globalShmStrategy.store(static_cast<uint32_t>(strategy)); globalStrategy.store(static_cast<uint32_t>(strategy));
} }
*strategyOut = strategy; *strategyOut = strategy;
return kErrorOk; return kErrorOk;
#else #else
*strategyOut = AnonymousMemStrategy::kTmpDir; *strategyOut = AnonymousMemoryStrategy::kTmpDir;
return kErrorOk; return kErrorOk;
#endif #endif
} }
#endif // ASMJIT_DUAL_MAPPING_ANON_FD #endif // ASMJIT_ANONYMOUS_MEMORY_USE_FD
// Virtual Memory [Posix] - Hardened Runtime & MAP_JIT // Virtual Memory [Posix] - Hardened Runtime & MAP_JIT
// =================================================== // ===================================================
@@ -554,19 +559,37 @@ static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
#endif #endif
} }
// Returns BSD-specific `PROT_MAX()` flags. ASMJIT_MAYBE_UNUSED
static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept {
#if defined(PROT_MAX)
static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value; static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift);
}
if (Support::test(memoryFlags, MemoryFlags::kMMapMaxAccessReadWrite | MemoryFlags::kMMapMaxAccessExecute)) ASMJIT_MAYBE_UNUSED
return PROT_MAX(mmProtFromMemoryFlags((MemoryFlags)(uint32_t(memoryFlags) >> kMaxProtShift))); static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept {
else static constexpr uint32_t kMaxProtShift = Support::ConstCTZ<uint32_t(MemoryFlags::kMMapMaxAccessRead)>::value;
return 0; return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift);
}
// Returns maximum protection flags from `memoryFlags`.
//
// Uses:
// - `PROT_MPROTECT()` on NetBSD.
// - `PROT_MAX()` when available(BSD).
ASMJIT_MAYBE_UNUSED
static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept {
MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags);
if (acc != MemoryFlags::kNone) {
#if defined(__NetBSD__) && defined(PROT_MPROTECT)
return PROT_MPROTECT(mmProtFromMemoryFlags(acc));
#elif defined(PROT_MAX)
return PROT_MAX(mmProtFromMemoryFlags(acc));
#else #else
DebugUtils::unused(memoryFlags); return 0;
return 0;
#endif #endif
}
return 0;
} }
static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
@@ -581,75 +604,89 @@ static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept {
return flags; return flags;
} }
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept {
*p = nullptr; *p = nullptr;
if (size == 0) if (size == 0)
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(kErrorInvalidArgument);
int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags); int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags);
int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | mmMapJitFromMemoryFlags(memoryFlags); int mmFlags = mmMapJitFromMemoryFlags(memoryFlags);
void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0); mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE;
if (fd == -1)
mmFlags |= MAP_ANONYMOUS;
void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset);
if (ptr == MAP_FAILED) if (ptr == MAP_FAILED)
return DebugUtils::errored(kErrorOutOfMemory); return DebugUtils::errored(asmjitErrorFromErrno(errno));
*p = ptr; *p = ptr;
return kErrorOk; return kErrorOk;
} }
Error release(void* p, size_t size) noexcept { static Error unmapMemory(void* p, size_t size) noexcept {
if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) if (ASMJIT_UNLIKELY(munmap(p, size) != 0))
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(asmjitErrorFromErrno(errno));
return kErrorOk; return kErrorOk;
} }
Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept {
return mapMemory(p, size, memoryFlags);
}
Error release(void* p, size_t size) noexcept {
return unmapMemory(p, size);
}
Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept { Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept {
int protection = mmProtFromMemoryFlags(memoryFlags); int protection = mmProtFromMemoryFlags(memoryFlags);
if (mprotect(p, size, protection) == 0) if (mprotect(p, size, protection) == 0)
return kErrorOk; return kErrorOk;
return DebugUtils::errored(kErrorInvalidArgument); return DebugUtils::errored(asmjitErrorFromErrno(errno));
} }
// Virtual Memory [Posix] - Dual Mapping // Virtual Memory [Posix] - Dual Mapping
// ===================================== // =====================================
#if defined(ASMJIT_DUAL_MAPPING_REMAPDUP) static Error unmapDualMapping(DualMapping* dm, size_t size) noexcept {
static void unmapDualMapping(DualMapping* dm, size_t size) noexcept { Error err1 = unmapMemory(dm->rx, size);
if (dm->rw) Error err2 = kErrorOk;
munmap(dm->rw, size);
if (dm->rx) if (dm->rx != dm->rw)
munmap(dm->rx, size); err2 = unmapMemory(dm->rw, size);
// We can report only one error, so report the first...
if (err1 || err2)
return DebugUtils::errored(err1 ? err1 : err2);
dm->rx = nullptr;
dm->rw = nullptr;
return kErrorOk;
} }
#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP)
static Error allocDualMappingUsingRemapdup(DualMapping* dmOut, size_t size, MemoryFlags memoryFlags) noexcept { static Error allocDualMappingUsingRemapdup(DualMapping* dmOut, size_t size, MemoryFlags memoryFlags) noexcept {
MemoryFlags maxAccessFlags = regularAccessFlagsToMaxAccessFlags(memoryFlags);
MemoryFlags finalFlags = memoryFlags | maxAccessFlags | MemoryFlags::kMapShared;
MemoryFlags rxFlags = finalFlags & ~(MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite);
MemoryFlags rwFlags = finalFlags & ~(MemoryFlags::kAccessExecute);
// Allocate RW mapping.
DualMapping dm {}; DualMapping dm {};
ASMJIT_PROPAGATE(mapMemory(&dm.rw, size, rwFlags));
dm.rw = mmap(NULL, size, PROT_MPROTECT(mmProtFromMemoryFlags(memoryFlags)), MAP_ANONYMOUS, -1, 0); // Allocate RX mapping.
if (dm.rw == MAP_FAILED) { dm.rx = mremap(dm.rw, size, nullptr, size, MAP_REMAPDUP);
return DebugUtils::errored(asmjitErrorFromErrno(errno));
}
dm.rx = mremap(dm.rw, size, NULL, size, MAP_REMAPDUP);
if (dm.rx == MAP_FAILED) { if (dm.rx == MAP_FAILED) {
int e = errno; int e = errno;
unmapDualMapping(&dm, size); munmap(dm.rw, size);
return DebugUtils::errored(asmjitErrorFromErrno(e)); return DebugUtils::errored(asmjitErrorFromErrno(e));
} }
MemoryFlags rxAccessFlags = memoryFlags & ~dualMappingFilter[0]; if (mprotect(dm.rx, size, mmProtFromMemoryFlags(rxFlags)) != 0) {
MemoryFlags rwAccessFlags = memoryFlags & ~dualMappingFilter[1];
if (mprotect(dm.rw, size, mmProtFromMemoryFlags(rwAccessFlags)) != 0) {
int e = errno;
unmapDualMapping(&dm, size);
return DebugUtils::errored(asmjitErrorFromErrno(e));
}
if (mprotect(dm.rx, size, mmProtFromMemoryFlags(rxAccessFlags)) != 0) {
int e = errno; int e = errno;
unmapDualMapping(&dm, size); unmapDualMapping(&dm, size);
return DebugUtils::errored(asmjitErrorFromErrno(e)); return DebugUtils::errored(asmjitErrorFromErrno(e));
@@ -667,14 +704,14 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
if (off_t(size) <= 0) if (off_t(size) <= 0)
return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge); return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge);
#if defined(ASMJIT_DUAL_MAPPING_REMAPDUP) #if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP)
return allocDualMappingUsingRemapdup(dm, size, memoryFlags); return allocDualMappingUsingRemapdup(dm, size, memoryFlags);
#elif defined(ASMJIT_DUAL_MAPPING_ANON_FD) #elif defined(ASMJIT_ANONYMOUS_MEMORY_USE_FD)
bool preferTmpOverDevShm = Support::test(memoryFlags, MemoryFlags::kMappingPreferTmp); bool preferTmpOverDevShm = Support::test(memoryFlags, MemoryFlags::kMappingPreferTmp);
if (!preferTmpOverDevShm) { if (!preferTmpOverDevShm) {
AnonymousMemStrategy strategy; AnonymousMemoryStrategy strategy;
ASMJIT_PROPAGATE(getAnonMemStrategy(&strategy)); ASMJIT_PROPAGATE(getAnonymousMemoryStrategy(&strategy));
preferTmpOverDevShm = (strategy == AnonymousMemStrategy::kTmpDir); preferTmpOverDevShm = (strategy == AnonymousMemoryStrategy::kTmpDir);
} }
AnonymousMemory anonMem; AnonymousMemory anonMem;
@@ -683,16 +720,12 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
void* ptr[2]; void* ptr[2];
for (uint32_t i = 0; i < 2; i++) { for (uint32_t i = 0; i < 2; i++) {
MemoryFlags accessFlags = memoryFlags & ~dualMappingFilter[i]; MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i];
int protection = mmProtFromMemoryFlags(accessFlags) | mmMaxProtFromMemoryFlags(accessFlags); Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0);
if (err != kErrorOk) {
ptr[i] = mmap(nullptr, size, protection, MAP_SHARED, anonMem.fd(), 0);
if (ptr[i] == MAP_FAILED) {
// Get the error now before `munmap()` has a chance to clobber it.
int e = errno;
if (i == 1) if (i == 1)
munmap(ptr[0], size); unmapMemory(ptr[0], size);
return DebugUtils::errored(asmjitErrorFromErrno(e)); return err;
} }
} }
@@ -705,16 +738,7 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no
} }
Error releaseDualMapping(DualMapping* dm, size_t size) noexcept { Error releaseDualMapping(DualMapping* dm, size_t size) noexcept {
Error err = release(dm->rx, size); return unmapDualMapping(dm, size);
if (dm->rx != dm->rw)
err |= release(dm->rw, size);
if (err)
return DebugUtils::errored(kErrorInvalidArgument);
dm->rx = nullptr;
dm->rw = nullptr;
return kErrorOk;
} }
#endif #endif
@@ -735,7 +759,7 @@ void flushInstructionCache(void* p, size_t size) noexcept {
char* end = start + size; char* end = start + size;
__builtin___clear_cache(start, end); __builtin___clear_cache(start, end);
#else #else
#pragma message("asmjit::VirtMem::flushInstructionCache() doesn't have implementation for the target OS and compiler") #pragma message("[asmjit] VirtMem::flushInstructionCache() doesn't have implementation for the target OS and compiler")
DebugUtils::unused(p, size); DebugUtils::unused(p, size);
#endif #endif
} }
@@ -770,7 +794,7 @@ HardenedRuntimeInfo hardenedRuntimeInfo() noexcept {
void protectJitMemory(ProtectJitAccess access) noexcept { void protectJitMemory(ProtectJitAccess access) noexcept {
#if defined(ASMJIT_HAS_PTHREAD_JIT_WRITE_PROTECT_NP) #if defined(ASMJIT_HAS_PTHREAD_JIT_WRITE_PROTECT_NP)
pthread_jit_write_protect_np(static_cast<uint32_t>(access)); pthread_jit_write_protect_np(static_cast<int>(access));
#else #else
DebugUtils::unused(access); DebugUtils::unused(access);
#endif #endif
@@ -778,4 +802,29 @@ void protectJitMemory(ProtectJitAccess access) noexcept {
ASMJIT_END_SUB_NAMESPACE ASMJIT_END_SUB_NAMESPACE
#endif // JitAllocator - Tests
// ====================
#if defined(ASMJIT_TEST)
ASMJIT_BEGIN_NAMESPACE
UNIT(virt_mem) {
VirtMem::Info vmInfo = VirtMem::info();
INFO("VirtMem::info():");
INFO(" pageSize: %zu", size_t(vmInfo.pageSize));
INFO(" pageGranularity: %zu", size_t(vmInfo.pageGranularity));
VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo();
VirtMem::HardenedRuntimeFlags hardenedFlags = hardenedRtInfo.flags;
INFO("VirtMem::hardenedRuntimeInfo():");
INFO(" flags:");
INFO(" kEnabled: %s", Support::test(hardenedFlags, VirtMem::HardenedRuntimeFlags::kEnabled) ? "true" : "false");
INFO(" kMapJit: %s", Support::test(hardenedFlags, VirtMem::HardenedRuntimeFlags::kMapJit) ? "true" : "false");
}
ASMJIT_END_NAMESPACE
#endif // ASMJIT_TEST
#endif // !ASMJIT_NO_JIT

View File

@@ -96,6 +96,13 @@ enum class MemoryFlags : uint32_t {
//! MemoryFlags::kMMapMaxAccessExecute. //! MemoryFlags::kMMapMaxAccessExecute.
kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute, kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute,
//! Use `MAP_SHARED` when calling mmap().
//!
//! \note In some cases `MAP_SHARED` may be set automatically. For example when using dual mapping it's important to
//! to use `MAP_SHARED` instead of `MAP_PRIVATE` to ensure that the OS would not copy pages on write (that would mean
//! updating only the RW mapped region and not RX mapped one).
kMapShared = 0x00000100u,
//! Not an access flag, only used by `allocDualMapping()` to override the default allocation strategy to always use //! Not an access flag, only used by `allocDualMapping()` to override the default allocation strategy to always use
//! a 'tmp' directory instead of "/dev/shm" (on POSIX platforms). Please note that this flag will be ignored if the //! a 'tmp' directory instead of "/dev/shm" (on POSIX platforms). Please note that this flag will be ignored if the
//! operating system allows to allocate an executable memory by a different API than `open()` or `shm_open()`. For //! operating system allows to allocate an executable memory by a different API than `open()` or `shm_open()`. For