diff --git a/src/asmjit/core/jitallocator.cpp b/src/asmjit/core/jitallocator.cpp index 313736c..2a4305f 100644 --- a/src/asmjit/core/jitallocator.cpp +++ b/src/asmjit/core/jitallocator.cpp @@ -506,7 +506,7 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t // // NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block // is only allocated when it's actually needed, so it would be cleared anyway. -static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept { +static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock** dst, JitAllocatorPool* pool, size_t blockSize) noexcept { using Support::BitWord; using Support::kBitWordSizeInBits; @@ -541,7 +541,10 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp if (block) ::free(block); - return nullptr; + if (err) + return err; + else + return kErrorOutOfMemory; } // Fill the memory if the secure mode is enabled. @@ -551,7 +554,8 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp } memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord)); - return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize); + *dst = new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize); + return kErrorOk; } static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept { @@ -798,12 +802,9 @@ Error JitAllocator::alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcep if (ASMJIT_UNLIKELY(!blockSize)) return DebugUtils::errored(kErrorOutOfMemory); - block = JitAllocatorImpl_newBlock(impl, pool, blockSize); + ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize)); areaIndex = 0; - if (ASMJIT_UNLIKELY(!block)) - return DebugUtils::errored(kErrorOutOfMemory); - JitAllocatorImpl_insertBlock(impl, block); block->_searchStart = areaSize; block->_largestUnusedArea = block->areaSize() - areaSize; @@ -1018,6 +1019,34 @@ public: uint64_t _state[2]; }; +namespace JitAllocatorUtils { + static void fillPattern64(void* p_, uint64_t pattern, size_t sizeInBytes) noexcept { + uint64_t* p = static_cast(p_); + size_t n = sizeInBytes / 8u; + + for (size_t i = 0; i < n; i++) + p[i] = pattern; + } + + static bool verifyPattern64(const void* p_, uint64_t pattern, size_t sizeInBytes) noexcept { + const uint64_t* p = static_cast(p_); + size_t n = sizeInBytes / 8u; + + for (size_t i = 0; i < n; i++) { + if (p[i] != pattern) { + INFO("Pattern verification failed at 0x%p [%zu * 8]: value(0x%016llX) != expected(0x%016llX)", + p, + i, + (unsigned long long)p[i], + (unsigned long long)pattern); + return false; + } + } + + return true; + } +} + // Helper class to verify that JitAllocator doesn't return addresses that overlap. class JitAllocatorWrapper { public: @@ -1035,9 +1064,19 @@ public: class Record : public ZoneTreeNodeT, public Range { public: - inline Record(uint8_t* addr, size_t size) + //! Read/write address, in case this is a dual mapping. + void* _rw; + //! Describes a pattern used to fill the allocated memory. + uint64_t pattern; + + inline Record(void* rx, void* rw, size_t size, uint64_t pattern) : ZoneTreeNodeT(), - Range(addr, size) {} + Range(static_cast(rx), size), + _rw(rw), + pattern(pattern) {} + + inline void* rx() const noexcept { return addr; } + inline void* rw() const noexcept { return _rw; } inline bool operator<(const Record& other) const noexcept { return addr < other.addr; } inline bool operator>(const Record& other) const noexcept { return addr > other.addr; } @@ -1050,14 +1089,16 @@ public: ZoneAllocator _heap; ZoneTree _records; JitAllocator _allocator; + Random _rng; explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept : _zone(1024 * 1024), _heap(&_zone), - _allocator(params) {} + _allocator(params), + _rng(0x123456789u) {} - void _insert(void* p_, size_t size) noexcept { - uint8_t* p = static_cast(p_); + void _insert(void* pRX, void* pRW, size_t size) noexcept { + uint8_t* p = static_cast(pRX); uint8_t* pEnd = p + size - 1; Record* record; @@ -1070,9 +1111,18 @@ public: if (record) EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size); - record = _heap.newT(p, size); + uint64_t pattern = _rng.nextUInt64(); + record = _heap.newT(pRX, pRW, size, pattern); EXPECT(record != nullptr, "Out of memory, cannot allocate 'Record'"); + { + VirtMem::ProtectJitReadWriteScope scope(pRW, size); + JitAllocatorUtils::fillPattern64(pRW, pattern, size); + } + + VirtMem::flushInstructionCache(pRX, size); + EXPECT(JitAllocatorUtils::verifyPattern64(pRX, pattern, size) == true); + _records.insert(record); } @@ -1080,6 +1130,9 @@ public: Record* record = _records.get(static_cast(p)); EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p); + EXPECT(JitAllocatorUtils::verifyPattern64(record->rx(), record->pattern, record->size) == true); + EXPECT(JitAllocatorUtils::verifyPattern64(record->rw(), record->pattern, record->size) == true); + _records.remove(record); _heap.release(record, sizeof(Record)); } @@ -1091,7 +1144,7 @@ public: Error err = _allocator.alloc(&rxPtr, &rwPtr, size); EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size); - _insert(rxPtr, size); + _insert(rxPtr, rwPtr, size); return rxPtr; } @@ -1156,7 +1209,7 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep } static void test_jit_allocator_alloc_release() noexcept { - size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000; + size_t kCount = BrokenAPI::hasArg("--quick") ? 20000 : 100000; struct TestParams { const char* name; @@ -1316,8 +1369,8 @@ UNIT(jit_allocator) { test_jit_allocator_alloc_release(); test_jit_allocator_query(); } -#endif +#endif // ASMJIT_TEST ASMJIT_END_NAMESPACE -#endif +#endif // !ASMJIT_NO_JIT diff --git a/src/asmjit/core/osutils.cpp b/src/asmjit/core/osutils.cpp index fa900bf..18aa2a0 100644 --- a/src/asmjit/core/osutils.cpp +++ b/src/asmjit/core/osutils.cpp @@ -76,7 +76,7 @@ uint32_t OSUtils::getTickCount() noexcept { uint64_t t = (uint64_t(ts.tv_sec ) * 1000u) + (uint64_t(ts.tv_nsec) / 1000000u); return uint32_t(t & 0xFFFFFFFFu); #else - #pragma message("asmjit::OSUtils::getTickCount() doesn't have implementation for the target OS.") + #pragma message("[asmjit] OSUtils::getTickCount() doesn't have implementation for the target OS.") return 0; #endif } diff --git a/src/asmjit/core/virtmem.cpp b/src/asmjit/core/virtmem.cpp index e26652a..b215e7b 100644 --- a/src/asmjit/core/virtmem.cpp +++ b/src/asmjit/core/virtmem.cpp @@ -43,7 +43,7 @@ #define MAP_ANONYMOUS MAP_ANON #endif - #define ASMJIT_DUAL_MAPPING_ANON_FD + #define ASMJIT_ANONYMOUS_MEMORY_USE_FD #if defined(__APPLE__) || defined(__BIONIC__) #define ASMJIT_VM_SHM_DETECT 0 @@ -61,8 +61,8 @@ #endif #if defined(__NetBSD__) && defined(MAP_REMAPDUP) && defined(PROT_MPROTECT) - #undef ASMJIT_DUAL_MAPPING_ANON_FD - #define ASMJIT_DUAL_MAPPING_REMAPDUP + #undef ASMJIT_ANONYMOUS_MEMORY_USE_FD + #define ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP #endif #endif @@ -73,6 +73,7 @@ ASMJIT_BEGIN_SUB_NAMESPACE(VirtMem) // Virtual Memory Utilities // ======================== +ASMJIT_MAYBE_UNUSED static const constexpr MemoryFlags dualMappingFilter[2] = { MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite, MemoryFlags::kAccessExecute | MemoryFlags::kMMapMaxAccessExecute @@ -286,13 +287,13 @@ static int mmProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { // Virtual Memory [Posix] - Anonymus Memory // ======================================== -#if defined(ASMJIT_DUAL_MAPPING_ANON_FD) +#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_FD) // Some operating systems don't allow /dev/shm to be executable. On Linux this happens when /dev/shm is mounted with // 'noexec', which is enforced by systemd. Other operating systems like MacOS also restrict executable permissions // regarding /dev/shm, so we use a runtime detection before attempting to allocate executable memory. Sometimes we -// don't need the detection as we know it would always result in `AnonymousMemStrategy::kTmpDir`. -enum class AnonymousMemStrategy : uint32_t { +// don't need the detection as we know it would always result in `AnonymousMemoryStrategy::kTmpDir`. +enum class AnonymousMemoryStrategy : uint32_t { kUnknown = 0, kDevShm = 1, kTmpDir = 2 @@ -333,9 +334,13 @@ public: #if defined(__linux__) && defined(__NR_memfd_create) // Linux specific 'memfd_create' - if the syscall returns `ENOSYS` it means // it's not available and we will never call it again (would be pointless). + // + // NOTE: There is also memfd_create() libc function in FreeBSD, but it internally + // uses `shm_open(SHM_ANON, ...)` so it's not needed to add support for it (it's + // not a syscall as in Linux). // Zero initialized, if ever changed to '1' that would mean the syscall is not - // available and we must use `shm_open()` and `shm_unlink()`. + // available and we must use `shm_open()` and `shm_unlink()` (or regular `open()`). static volatile uint32_t memfd_create_not_supported; if (!memfd_create_not_supported) { @@ -440,7 +445,7 @@ public: }; #if ASMJIT_VM_SHM_DETECT -static Error detectAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept { +static Error detectAnonymousMemoryStrategy(AnonymousMemoryStrategy* strategyOut) noexcept { AnonymousMemory anonMem; Info vmInfo = info(); @@ -451,39 +456,39 @@ static Error detectAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept { if (ptr == MAP_FAILED) { int e = errno; if (e == EINVAL) { - *strategyOut = AnonymousMemStrategy::kTmpDir; + *strategyOut = AnonymousMemoryStrategy::kTmpDir; return kErrorOk; } return DebugUtils::errored(asmjitErrorFromErrno(e)); } else { munmap(ptr, vmInfo.pageSize); - *strategyOut = AnonymousMemStrategy::kDevShm; + *strategyOut = AnonymousMemoryStrategy::kDevShm; return kErrorOk; } } #endif -static Error getAnonMemStrategy(AnonymousMemStrategy* strategyOut) noexcept { +static Error getAnonymousMemoryStrategy(AnonymousMemoryStrategy* strategyOut) noexcept { #if ASMJIT_VM_SHM_DETECT // Initially don't assume anything. It has to be tested whether '/dev/shm' was mounted with 'noexec' flag or not. - static std::atomic globalShmStrategy; + static std::atomic globalStrategy; - AnonymousMemStrategy strategy = static_cast(globalShmStrategy.load()); - if (strategy == AnonymousMemStrategy::kUnknown) { - ASMJIT_PROPAGATE(detectAnonMemStrategy(&strategy)); - globalShmStrategy.store(static_cast(strategy)); + AnonymousMemoryStrategy strategy = static_cast(globalStrategy.load()); + if (strategy == AnonymousMemoryStrategy::kUnknown) { + ASMJIT_PROPAGATE(detectAnonymousMemoryStrategy(&strategy)); + globalStrategy.store(static_cast(strategy)); } *strategyOut = strategy; return kErrorOk; #else - *strategyOut = AnonymousMemStrategy::kTmpDir; + *strategyOut = AnonymousMemoryStrategy::kTmpDir; return kErrorOk; #endif } -#endif // ASMJIT_DUAL_MAPPING_ANON_FD +#endif // ASMJIT_ANONYMOUS_MEMORY_USE_FD // Virtual Memory [Posix] - Hardened Runtime & MAP_JIT // =================================================== @@ -554,19 +559,37 @@ static inline int mmMapJitFromMemoryFlags(MemoryFlags memoryFlags) noexcept { #endif } -// Returns BSD-specific `PROT_MAX()` flags. -static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { -#if defined(PROT_MAX) +ASMJIT_MAYBE_UNUSED +static MemoryFlags maxAccessFlagsToRegularAccessFlags(MemoryFlags memoryFlags) noexcept { static constexpr uint32_t kMaxProtShift = Support::ConstCTZ::value; + return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kMMapMaxAccessRWX) >> kMaxProtShift); +} - if (Support::test(memoryFlags, MemoryFlags::kMMapMaxAccessReadWrite | MemoryFlags::kMMapMaxAccessExecute)) - return PROT_MAX(mmProtFromMemoryFlags((MemoryFlags)(uint32_t(memoryFlags) >> kMaxProtShift))); - else - return 0; +ASMJIT_MAYBE_UNUSED +static MemoryFlags regularAccessFlagsToMaxAccessFlags(MemoryFlags memoryFlags) noexcept { + static constexpr uint32_t kMaxProtShift = Support::ConstCTZ::value; + return MemoryFlags(uint32_t(memoryFlags & MemoryFlags::kAccessRWX) << kMaxProtShift); +} + +// Returns maximum protection flags from `memoryFlags`. +// +// Uses: +// - `PROT_MPROTECT()` on NetBSD. +// - `PROT_MAX()` when available(BSD). +ASMJIT_MAYBE_UNUSED +static inline int mmMaxProtFromMemoryFlags(MemoryFlags memoryFlags) noexcept { + MemoryFlags acc = maxAccessFlagsToRegularAccessFlags(memoryFlags); + if (acc != MemoryFlags::kNone) { +#if defined(__NetBSD__) && defined(PROT_MPROTECT) + return PROT_MPROTECT(mmProtFromMemoryFlags(acc)); +#elif defined(PROT_MAX) + return PROT_MAX(mmProtFromMemoryFlags(acc)); #else - DebugUtils::unused(memoryFlags); - return 0; + return 0; #endif + } + + return 0; } static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { @@ -581,75 +604,89 @@ static HardenedRuntimeFlags getHardenedRuntimeFlags() noexcept { return flags; } -Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { +static Error mapMemory(void** p, size_t size, MemoryFlags memoryFlags, int fd = -1, off_t offset = 0) noexcept { *p = nullptr; if (size == 0) return DebugUtils::errored(kErrorInvalidArgument); int protection = mmProtFromMemoryFlags(memoryFlags) | mmMaxProtFromMemoryFlags(memoryFlags); - int mmFlags = MAP_PRIVATE | MAP_ANONYMOUS | mmMapJitFromMemoryFlags(memoryFlags); + int mmFlags = mmMapJitFromMemoryFlags(memoryFlags); - void* ptr = mmap(nullptr, size, protection, mmFlags, -1, 0); + mmFlags |= Support::test(memoryFlags, MemoryFlags::kMapShared) ? MAP_SHARED : MAP_PRIVATE; + if (fd == -1) + mmFlags |= MAP_ANONYMOUS; + + void* ptr = mmap(nullptr, size, protection, mmFlags, fd, offset); if (ptr == MAP_FAILED) - return DebugUtils::errored(kErrorOutOfMemory); + return DebugUtils::errored(asmjitErrorFromErrno(errno)); *p = ptr; return kErrorOk; } -Error release(void* p, size_t size) noexcept { +static Error unmapMemory(void* p, size_t size) noexcept { if (ASMJIT_UNLIKELY(munmap(p, size) != 0)) - return DebugUtils::errored(kErrorInvalidArgument); + return DebugUtils::errored(asmjitErrorFromErrno(errno)); return kErrorOk; } +Error alloc(void** p, size_t size, MemoryFlags memoryFlags) noexcept { + return mapMemory(p, size, memoryFlags); +} + +Error release(void* p, size_t size) noexcept { + return unmapMemory(p, size); +} Error protect(void* p, size_t size, MemoryFlags memoryFlags) noexcept { int protection = mmProtFromMemoryFlags(memoryFlags); if (mprotect(p, size, protection) == 0) return kErrorOk; - return DebugUtils::errored(kErrorInvalidArgument); + return DebugUtils::errored(asmjitErrorFromErrno(errno)); } // Virtual Memory [Posix] - Dual Mapping // ===================================== -#if defined(ASMJIT_DUAL_MAPPING_REMAPDUP) -static void unmapDualMapping(DualMapping* dm, size_t size) noexcept { - if (dm->rw) - munmap(dm->rw, size); +static Error unmapDualMapping(DualMapping* dm, size_t size) noexcept { + Error err1 = unmapMemory(dm->rx, size); + Error err2 = kErrorOk; - if (dm->rx) - munmap(dm->rx, size); + if (dm->rx != dm->rw) + err2 = unmapMemory(dm->rw, size); + + // We can report only one error, so report the first... + if (err1 || err2) + return DebugUtils::errored(err1 ? err1 : err2); + + dm->rx = nullptr; + dm->rw = nullptr; + return kErrorOk; } +#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP) static Error allocDualMappingUsingRemapdup(DualMapping* dmOut, size_t size, MemoryFlags memoryFlags) noexcept { + MemoryFlags maxAccessFlags = regularAccessFlagsToMaxAccessFlags(memoryFlags); + MemoryFlags finalFlags = memoryFlags | maxAccessFlags | MemoryFlags::kMapShared; + + MemoryFlags rxFlags = finalFlags & ~(MemoryFlags::kAccessWrite | MemoryFlags::kMMapMaxAccessWrite); + MemoryFlags rwFlags = finalFlags & ~(MemoryFlags::kAccessExecute); + + // Allocate RW mapping. DualMapping dm {}; + ASMJIT_PROPAGATE(mapMemory(&dm.rw, size, rwFlags)); - dm.rw = mmap(NULL, size, PROT_MPROTECT(mmProtFromMemoryFlags(memoryFlags)), MAP_ANONYMOUS, -1, 0); - if (dm.rw == MAP_FAILED) { - return DebugUtils::errored(asmjitErrorFromErrno(errno)); - } - - dm.rx = mremap(dm.rw, size, NULL, size, MAP_REMAPDUP); + // Allocate RX mapping. + dm.rx = mremap(dm.rw, size, nullptr, size, MAP_REMAPDUP); if (dm.rx == MAP_FAILED) { int e = errno; - unmapDualMapping(&dm, size); + munmap(dm.rw, size); return DebugUtils::errored(asmjitErrorFromErrno(e)); } - MemoryFlags rxAccessFlags = memoryFlags & ~dualMappingFilter[0]; - MemoryFlags rwAccessFlags = memoryFlags & ~dualMappingFilter[1]; - - if (mprotect(dm.rw, size, mmProtFromMemoryFlags(rwAccessFlags)) != 0) { - int e = errno; - unmapDualMapping(&dm, size); - return DebugUtils::errored(asmjitErrorFromErrno(e)); - } - - if (mprotect(dm.rx, size, mmProtFromMemoryFlags(rxAccessFlags)) != 0) { + if (mprotect(dm.rx, size, mmProtFromMemoryFlags(rxFlags)) != 0) { int e = errno; unmapDualMapping(&dm, size); return DebugUtils::errored(asmjitErrorFromErrno(e)); @@ -667,14 +704,14 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no if (off_t(size) <= 0) return DebugUtils::errored(size == 0 ? kErrorInvalidArgument : kErrorTooLarge); -#if defined(ASMJIT_DUAL_MAPPING_REMAPDUP) +#if defined(ASMJIT_ANONYMOUS_MEMORY_USE_REMAPDUP) return allocDualMappingUsingRemapdup(dm, size, memoryFlags); -#elif defined(ASMJIT_DUAL_MAPPING_ANON_FD) +#elif defined(ASMJIT_ANONYMOUS_MEMORY_USE_FD) bool preferTmpOverDevShm = Support::test(memoryFlags, MemoryFlags::kMappingPreferTmp); if (!preferTmpOverDevShm) { - AnonymousMemStrategy strategy; - ASMJIT_PROPAGATE(getAnonMemStrategy(&strategy)); - preferTmpOverDevShm = (strategy == AnonymousMemStrategy::kTmpDir); + AnonymousMemoryStrategy strategy; + ASMJIT_PROPAGATE(getAnonymousMemoryStrategy(&strategy)); + preferTmpOverDevShm = (strategy == AnonymousMemoryStrategy::kTmpDir); } AnonymousMemory anonMem; @@ -683,16 +720,12 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no void* ptr[2]; for (uint32_t i = 0; i < 2; i++) { - MemoryFlags accessFlags = memoryFlags & ~dualMappingFilter[i]; - int protection = mmProtFromMemoryFlags(accessFlags) | mmMaxProtFromMemoryFlags(accessFlags); - - ptr[i] = mmap(nullptr, size, protection, MAP_SHARED, anonMem.fd(), 0); - if (ptr[i] == MAP_FAILED) { - // Get the error now before `munmap()` has a chance to clobber it. - int e = errno; + MemoryFlags restrictedMemoryFlags = memoryFlags & ~dualMappingFilter[i]; + Error err = mapMemory(&ptr[i], size, restrictedMemoryFlags | MemoryFlags::kMapShared, anonMem.fd(), 0); + if (err != kErrorOk) { if (i == 1) - munmap(ptr[0], size); - return DebugUtils::errored(asmjitErrorFromErrno(e)); + unmapMemory(ptr[0], size); + return err; } } @@ -705,16 +738,7 @@ Error allocDualMapping(DualMapping* dm, size_t size, MemoryFlags memoryFlags) no } Error releaseDualMapping(DualMapping* dm, size_t size) noexcept { - Error err = release(dm->rx, size); - if (dm->rx != dm->rw) - err |= release(dm->rw, size); - - if (err) - return DebugUtils::errored(kErrorInvalidArgument); - - dm->rx = nullptr; - dm->rw = nullptr; - return kErrorOk; + return unmapDualMapping(dm, size); } #endif @@ -735,7 +759,7 @@ void flushInstructionCache(void* p, size_t size) noexcept { char* end = start + size; __builtin___clear_cache(start, end); #else - #pragma message("asmjit::VirtMem::flushInstructionCache() doesn't have implementation for the target OS and compiler") + #pragma message("[asmjit] VirtMem::flushInstructionCache() doesn't have implementation for the target OS and compiler") DebugUtils::unused(p, size); #endif } @@ -770,7 +794,7 @@ HardenedRuntimeInfo hardenedRuntimeInfo() noexcept { void protectJitMemory(ProtectJitAccess access) noexcept { #if defined(ASMJIT_HAS_PTHREAD_JIT_WRITE_PROTECT_NP) - pthread_jit_write_protect_np(static_cast(access)); + pthread_jit_write_protect_np(static_cast(access)); #else DebugUtils::unused(access); #endif @@ -778,4 +802,29 @@ void protectJitMemory(ProtectJitAccess access) noexcept { ASMJIT_END_SUB_NAMESPACE -#endif +// JitAllocator - Tests +// ==================== + +#if defined(ASMJIT_TEST) +ASMJIT_BEGIN_NAMESPACE + +UNIT(virt_mem) { + VirtMem::Info vmInfo = VirtMem::info(); + + INFO("VirtMem::info():"); + INFO(" pageSize: %zu", size_t(vmInfo.pageSize)); + INFO(" pageGranularity: %zu", size_t(vmInfo.pageGranularity)); + + VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo(); + VirtMem::HardenedRuntimeFlags hardenedFlags = hardenedRtInfo.flags; + + INFO("VirtMem::hardenedRuntimeInfo():"); + INFO(" flags:"); + INFO(" kEnabled: %s", Support::test(hardenedFlags, VirtMem::HardenedRuntimeFlags::kEnabled) ? "true" : "false"); + INFO(" kMapJit: %s", Support::test(hardenedFlags, VirtMem::HardenedRuntimeFlags::kMapJit) ? "true" : "false"); +} + +ASMJIT_END_NAMESPACE +#endif // ASMJIT_TEST + +#endif // !ASMJIT_NO_JIT diff --git a/src/asmjit/core/virtmem.h b/src/asmjit/core/virtmem.h index 3118f0b..a5633a2 100644 --- a/src/asmjit/core/virtmem.h +++ b/src/asmjit/core/virtmem.h @@ -96,6 +96,13 @@ enum class MemoryFlags : uint32_t { //! MemoryFlags::kMMapMaxAccessExecute. kMMapMaxAccessRWX = kMMapMaxAccessRead | kMMapMaxAccessWrite | kMMapMaxAccessExecute, + //! Use `MAP_SHARED` when calling mmap(). + //! + //! \note In some cases `MAP_SHARED` may be set automatically. For example when using dual mapping it's important to + //! to use `MAP_SHARED` instead of `MAP_PRIVATE` to ensure that the OS would not copy pages on write (that would mean + //! updating only the RW mapped region and not RX mapped one). + kMapShared = 0x00000100u, + //! Not an access flag, only used by `allocDualMapping()` to override the default allocation strategy to always use //! a 'tmp' directory instead of "/dev/shm" (on POSIX platforms). Please note that this flag will be ignored if the //! operating system allows to allocate an executable memory by a different API than `open()` or `shm_open()`. For