summaryrefslogtreecommitdiff
path: root/yjit/src/virtualmem.rs
diff options
context:
space:
mode:
authorAlan Wu <XrXr@users.noreply.github.com>2023-02-09 10:34:19 -0500
committerGitHub <noreply@github.com>2023-02-09 10:34:19 -0500
commitb78f871d838c168789648738e5c67b071beb8a19 (patch)
tree168e90e179e6c0f15d97dde22f331844deac059f /yjit/src/virtualmem.rs
parent970e7cdec30d037c680ab8b36f8e4547159f3495 (diff)
downloadruby-b78f871d838c168789648738e5c67b071beb8a19.tar.gz
YJIT: Use the system page size when the code page size is too small (#7267)
Previously on ARM64 Linux systems that use 64 KiB pages (`CONFIG_ARM64_64K_PAGES=y`), YJIT was panicking on boot due to a failed assertion. The assertion was making sure that code GC can free the last code page that YJIT manages without freeing unrelated memory. YJIT prefers picking 16 KiB as the granularity at which to free code memory, but when the system can only free at 64 KiB granularity, that is not possible. The fix is to use the system page size as the code page size when the system page size is 64 KiB. Continue to use 16 KiB as the code page size on common systems that use 16/4 KiB pages. Add asserts to code_gc() and free_page() about code GC's assumptions. Fixes [Bug #19400]
Diffstat (limited to 'yjit/src/virtualmem.rs')
-rw-r--r--yjit/src/virtualmem.rs13
1 files changed, 13 insertions, 0 deletions
diff --git a/yjit/src/virtualmem.rs b/yjit/src/virtualmem.rs
index 1a5b2b1908..33194b09a3 100644
--- a/yjit/src/virtualmem.rs
+++ b/yjit/src/virtualmem.rs
@@ -115,6 +115,12 @@ impl<A: Allocator> VirtualMemory<A> {
self.region_size_bytes
}
+ /// The granularity at which we can control memory permission.
+ /// On Linux, this is the page size that mmap(2) talks about.
+ pub fn system_page_size(&self) -> usize {
+ self.page_size_bytes
+ }
+
/// Write a single byte. The first write to a page makes it readable.
pub fn write_byte(&mut self, write_ptr: CodePtr, byte: u8) -> Result<(), WriteError> {
let page_size = self.page_size_bytes;
@@ -200,6 +206,13 @@ impl<A: Allocator> VirtualMemory<A> {
/// Free a range of bytes. start_ptr must be memory page-aligned.
pub fn free_bytes(&mut self, start_ptr: CodePtr, size: u32) {
assert_eq!(start_ptr.into_usize() % self.page_size_bytes, 0);
+
+ // Bounds check the request. We should only free memory we manage.
+ let region_range = self.region_start.as_ptr() as *const u8..self.end_ptr().raw_ptr();
+ let last_byte_to_free = start_ptr.add_bytes(size.saturating_sub(1).as_usize()).raw_ptr();
+ assert!(region_range.contains(&start_ptr.raw_ptr()));
+ assert!(region_range.contains(&last_byte_to_free));
+
self.allocator.mark_unused(start_ptr.0.as_ptr(), size);
}
}