summaryrefslogtreecommitdiff
path: root/erts/emulator/asmjit/core/jitallocator.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'erts/emulator/asmjit/core/jitallocator.cpp')
-rw-r--r--erts/emulator/asmjit/core/jitallocator.cpp176
1 files changed, 155 insertions, 21 deletions
diff --git a/erts/emulator/asmjit/core/jitallocator.cpp b/erts/emulator/asmjit/core/jitallocator.cpp
index 19fbe4b233..2a4305f069 100644
--- a/erts/emulator/asmjit/core/jitallocator.cpp
+++ b/erts/emulator/asmjit/core/jitallocator.cpp
@@ -430,6 +430,15 @@ static inline JitAllocatorPrivateImpl* JitAllocatorImpl_new(const JitAllocator::
if (ASMJIT_UNLIKELY(!p))
return nullptr;
+ VirtMem::HardenedRuntimeInfo hardenedRtInfo = VirtMem::hardenedRuntimeInfo();
+ if (Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kEnabled)) {
+ // If we are running within a hardened environment (mapping RWX is not allowed) then we have to use dual mapping
+ // or other runtime capabilities like Apple specific MAP_JIT. There is no point in not enabling these as otherwise
+ // the allocation would fail and JitAllocator would not be able to allocate memory.
+ if (!Support::test(hardenedRtInfo.flags, VirtMem::HardenedRuntimeFlags::kMapJit))
+ options |= JitAllocatorOptions::kUseDualMapping;
+ }
+
JitAllocatorPool* pools = reinterpret_cast<JitAllocatorPool*>((uint8_t*)p + sizeof(JitAllocatorPrivateImpl));
JitAllocatorPrivateImpl* impl = new(p) JitAllocatorPrivateImpl(pools, poolCount);
@@ -497,7 +506,7 @@ ASMJIT_FAVOR_SPEED static void JitAllocatorImpl_fillPattern(void* mem, uint32_t
//
// NOTE: The block doesn't have `kFlagEmpty` flag set, because the new block
// is only allocated when it's actually needed, so it would be cleared anyway.
-static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorPool* pool, size_t blockSize) noexcept {
+static Error JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock** dst, JitAllocatorPool* pool, size_t blockSize) noexcept {
using Support::BitWord;
using Support::kBitWordSizeInBits;
@@ -532,7 +541,10 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp
if (block)
::free(block);
- return nullptr;
+ if (err)
+ return err;
+ else
+ return kErrorOutOfMemory;
}
// Fill the memory if the secure mode is enabled.
@@ -542,7 +554,8 @@ static JitAllocatorBlock* JitAllocatorImpl_newBlock(JitAllocatorPrivateImpl* imp
}
memset(bitWords, 0, size_t(numBitWords) * 2 * sizeof(BitWord));
- return new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
+ *dst = new(block) JitAllocatorBlock(pool, virtMem, blockSize, blockFlags, bitWords, bitWords + numBitWords, areaSize);
+ return kErrorOk;
}
static void JitAllocatorImpl_deleteBlock(JitAllocatorPrivateImpl* impl, JitAllocatorBlock* block) noexcept {
@@ -789,12 +802,9 @@ Error JitAllocator::alloc(void** rxPtrOut, void** rwPtrOut, size_t size) noexcep
if (ASMJIT_UNLIKELY(!blockSize))
return DebugUtils::errored(kErrorOutOfMemory);
- block = JitAllocatorImpl_newBlock(impl, pool, blockSize);
+ ASMJIT_PROPAGATE(JitAllocatorImpl_newBlock(impl, &block, pool, blockSize));
areaIndex = 0;
- if (ASMJIT_UNLIKELY(!block))
- return DebugUtils::errored(kErrorOutOfMemory);
-
JitAllocatorImpl_insertBlock(impl, block);
block->_searchStart = areaSize;
block->_largestUnusedArea = block->areaSize() - areaSize;
@@ -889,8 +899,12 @@ Error JitAllocator::shrink(void* rxPtr, size_t newSize) noexcept {
// The first bit representing the allocated area and its size.
uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
- uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
+ bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
+ if (ASMJIT_UNLIKELY(!isUsed))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
uint32_t areaPrevSize = areaEnd - areaStart;
uint32_t areaShrunkSize = pool->areaSizeFromByteSize(newSize);
@@ -902,13 +916,55 @@ Error JitAllocator::shrink(void* rxPtr, size_t newSize) noexcept {
block->markShrunkArea(areaStart + areaShrunkSize, areaEnd);
// Fill released memory if the secure mode is enabled.
- if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory))
- JitAllocatorImpl_fillPattern(block->rwPtr() + (areaStart + areaShrunkSize) * pool->granularity, fillPattern(), areaDiff * pool->granularity);
+ if (Support::test(impl->options, JitAllocatorOptions::kFillUnusedMemory)) {
+ uint8_t* spanPtr = block->rwPtr() + (areaStart + areaShrunkSize) * pool->granularity;
+ size_t spanSize = areaDiff * pool->granularity;
+
+ VirtMem::ProtectJitReadWriteScope scope(spanPtr, spanSize);
+ JitAllocatorImpl_fillPattern(spanPtr, fillPattern(), spanSize);
+ }
}
return kErrorOk;
}
+Error JitAllocator::query(void* rxPtr, void** rxPtrOut, void** rwPtrOut, size_t* sizeOut) const noexcept {
+ *rxPtrOut = nullptr;
+ *rwPtrOut = nullptr;
+ *sizeOut = 0u;
+
+ if (ASMJIT_UNLIKELY(_impl == &JitAllocatorImpl_none))
+ return DebugUtils::errored(kErrorNotInitialized);
+
+ JitAllocatorPrivateImpl* impl = static_cast<JitAllocatorPrivateImpl*>(_impl);
+ LockGuard guard(impl->lock);
+ JitAllocatorBlock* block = impl->tree.get(static_cast<uint8_t*>(rxPtr));
+
+ if (ASMJIT_UNLIKELY(!block))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ // Offset relative to the start of the block.
+ JitAllocatorPool* pool = block->pool();
+ size_t offset = (size_t)((uint8_t*)rxPtr - block->rxPtr());
+
+ // The first bit representing the allocated area and its size.
+ uint32_t areaStart = uint32_t(offset >> pool->granularityLog2);
+
+ bool isUsed = Support::bitVectorGetBit(block->_usedBitVector, areaStart);
+ if (ASMJIT_UNLIKELY(!isUsed))
+ return DebugUtils::errored(kErrorInvalidArgument);
+
+ uint32_t areaEnd = uint32_t(Support::bitVectorIndexOf(block->_stopBitVector, areaStart, true)) + 1;
+ size_t byteOffset = pool->byteSizeFromAreaSize(areaStart);
+ size_t byteSize = pool->byteSizeFromAreaSize(areaEnd - areaStart);
+
+ *rxPtrOut = static_cast<uint8_t*>(block->_mapping.rx) + byteOffset;
+ *rwPtrOut = static_cast<uint8_t*>(block->_mapping.rw) + byteOffset;
+ *sizeOut = byteSize;
+
+ return kErrorOk;
+}
+
// JitAllocator - Tests
// ====================
@@ -963,6 +1019,34 @@ public:
uint64_t _state[2];
};
+namespace JitAllocatorUtils {
+ static void fillPattern64(void* p_, uint64_t pattern, size_t sizeInBytes) noexcept {
+ uint64_t* p = static_cast<uint64_t*>(p_);
+ size_t n = sizeInBytes / 8u;
+
+ for (size_t i = 0; i < n; i++)
+ p[i] = pattern;
+ }
+
+ static bool verifyPattern64(const void* p_, uint64_t pattern, size_t sizeInBytes) noexcept {
+ const uint64_t* p = static_cast<const uint64_t*>(p_);
+ size_t n = sizeInBytes / 8u;
+
+ for (size_t i = 0; i < n; i++) {
+ if (p[i] != pattern) {
+ INFO("Pattern verification failed at 0x%p [%zu * 8]: value(0x%016llX) != expected(0x%016llX)",
+ p,
+ i,
+ (unsigned long long)p[i],
+ (unsigned long long)pattern);
+ return false;
+ }
+ }
+
+ return true;
+ }
+}
+
// Helper class to verify that JitAllocator doesn't return addresses that overlap.
class JitAllocatorWrapper {
public:
@@ -980,9 +1064,19 @@ public:
class Record : public ZoneTreeNodeT<Record>,
public Range {
public:
- inline Record(uint8_t* addr, size_t size)
+ //! Read/write address, in case this is a dual mapping.
+ void* _rw;
+ //! Describes a pattern used to fill the allocated memory.
+ uint64_t pattern;
+
+ inline Record(void* rx, void* rw, size_t size, uint64_t pattern)
: ZoneTreeNodeT<Record>(),
- Range(addr, size) {}
+ Range(static_cast<uint8_t*>(rx), size),
+ _rw(rw),
+ pattern(pattern) {}
+
+ inline void* rx() const noexcept { return addr; }
+ inline void* rw() const noexcept { return _rw; }
inline bool operator<(const Record& other) const noexcept { return addr < other.addr; }
inline bool operator>(const Record& other) const noexcept { return addr > other.addr; }
@@ -995,14 +1089,16 @@ public:
ZoneAllocator _heap;
ZoneTree<Record> _records;
JitAllocator _allocator;
+ Random _rng;
explicit JitAllocatorWrapper(const JitAllocator::CreateParams* params) noexcept
: _zone(1024 * 1024),
_heap(&_zone),
- _allocator(params) {}
+ _allocator(params),
+ _rng(0x123456789u) {}
- void _insert(void* p_, size_t size) noexcept {
- uint8_t* p = static_cast<uint8_t*>(p_);
+ void _insert(void* pRX, void* pRW, size_t size) noexcept {
+ uint8_t* p = static_cast<uint8_t*>(pRX);
uint8_t* pEnd = p + size - 1;
Record* record;
@@ -1015,9 +1111,18 @@ public:
if (record)
EXPECT(record == nullptr, "Address [%p:%p] collides with a newly allocated [%p:%p]\n", record->addr, record->addr + record->size, p, p + size);
- record = _heap.newT<Record>(p, size);
+ uint64_t pattern = _rng.nextUInt64();
+ record = _heap.newT<Record>(pRX, pRW, size, pattern);
EXPECT(record != nullptr, "Out of memory, cannot allocate 'Record'");
+ {
+ VirtMem::ProtectJitReadWriteScope scope(pRW, size);
+ JitAllocatorUtils::fillPattern64(pRW, pattern, size);
+ }
+
+ VirtMem::flushInstructionCache(pRX, size);
+ EXPECT(JitAllocatorUtils::verifyPattern64(pRX, pattern, size) == true);
+
_records.insert(record);
}
@@ -1025,6 +1130,9 @@ public:
Record* record = _records.get(static_cast<uint8_t*>(p));
EXPECT(record != nullptr, "Address [%p] doesn't exist\n", p);
+ EXPECT(JitAllocatorUtils::verifyPattern64(record->rx(), record->pattern, record->size) == true);
+ EXPECT(JitAllocatorUtils::verifyPattern64(record->rw(), record->pattern, record->size) == true);
+
_records.remove(record);
_heap.release(record, sizeof(Record));
}
@@ -1036,7 +1144,7 @@ public:
Error err = _allocator.alloc(&rxPtr, &rwPtr, size);
EXPECT(err == kErrorOk, "JitAllocator failed to allocate %zu bytes\n", size);
- _insert(rxPtr, size);
+ _insert(rxPtr, rwPtr, size);
return rxPtr;
}
@@ -1100,8 +1208,8 @@ static void BitVectorRangeIterator_testRandom(Random& rnd, size_t count) noexcep
}
}
-UNIT(jit_allocator) {
- size_t kCount = BrokenAPI::hasArg("--quick") ? 1000 : 100000;
+static void test_jit_allocator_alloc_release() noexcept {
+ size_t kCount = BrokenAPI::hasArg("--quick") ? 20000 : 100000;
struct TestParams {
const char* name;
@@ -1235,8 +1343,34 @@ UNIT(jit_allocator) {
::free(ptrArray);
}
}
-#endif
+
+static void test_jit_allocator_query() noexcept {
+ JitAllocator allocator;
+
+ void* rxPtr = nullptr;
+ void* rwPtr = nullptr;
+ size_t size = 100;
+
+ EXPECT(allocator.alloc(&rxPtr, &rwPtr, size) == kErrorOk);
+ EXPECT(rxPtr != nullptr);
+ EXPECT(rwPtr != nullptr);
+
+ void* rxPtrQueried = nullptr;
+ void* rwPtrQueried = nullptr;
+ size_t sizeQueried;
+
+ EXPECT(allocator.query(rxPtr, &rxPtrQueried, &rwPtrQueried, &sizeQueried) == kErrorOk);
+ EXPECT(rxPtrQueried == rxPtr);
+ EXPECT(rwPtrQueried == rwPtr);
+ EXPECT(sizeQueried == Support::alignUp(size, allocator.granularity()));
+}
+
+UNIT(jit_allocator) {
+ test_jit_allocator_alloc_release();
+ test_jit_allocator_query();
+}
+#endif // ASMJIT_TEST
ASMJIT_END_NAMESPACE
-#endif
+#endif // !ASMJIT_NO_JIT