summaryrefslogtreecommitdiff
path: root/rts/Linker.c
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-10-28 00:34:09 -0400
committerMarge Bot <ben+marge-bot@smart-cactus.org>2019-11-04 03:41:44 -0500
commit120f2e5343d5ccd3ad117d530018b75302c6482b (patch)
tree66b3e392325172021ef79518a0027de2962a2f84 /rts/Linker.c
parent5d4f16eed151caddf4624ff0a1fc23d5a4475957 (diff)
downloadhaskell-120f2e5343d5ccd3ad117d530018b75302c6482b.tar.gz
rts/linker: Ensure that code isn't writable
For many years the linker would simply map all of its memory with PROT_READ|PROT_WRITE|PROT_EXEC. However operating systems have been becoming increasingly reluctant to accept this practice (e.g. #17353 and #12657) and for good reason: writable code is ripe for exploitation. Consequently mmapForLinker now maps its memory with PROT_READ|PROT_WRITE. After the linker has finished filling/relocating the mapping it must then call mmapForLinkerMarkExecutable on the sections of the mapping which contain executable code. Moreover, to make all of this possible it was necessary to redesign the m32 allocator. First, we gave (in an earlier commit) each ObjectCode its own m32_allocator. This was necessary since code loading and symbol resolution/relocation are currently interleaved, meaning that it is not possible to enforce W^X when symbols from different objects reside in the same page. We then redesigned the m32 allocator to take advantage of the fact that all of the pages allocated with the allocator die at the same time (namely, when the owning ObjectCode is unloaded). This makes a number of things simpler (e.g. no more page reference counting; the interface provided by the allocator for freeing is simpler). See Note [M32 Allocator] for details.
Diffstat (limited to 'rts/Linker.c')
-rw-r--r--rts/Linker.c61
1 files changed, 54 insertions, 7 deletions
diff --git a/rts/Linker.c b/rts/Linker.c
index 544e7675f0..7bd2e67278 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -1020,7 +1020,7 @@ mmap_again:
map_addr = mmap_32bit_base;
}
- const int prot = PROT_READ | PROT_WRITE | PROT_EXEC;
+ const int prot = PROT_READ | PROT_WRITE;
IF_DEBUG(linker,
debugBelch("mmapForLinker: \tprotection %#0x\n", prot));
IF_DEBUG(linker,
@@ -1091,6 +1091,40 @@ mmap_again:
return result;
}
+
+/* Note [Memory protection in the linker]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * For many years the linker would simply map all of its memory
+ * with PROT_READ|PROT_WRITE|PROT_EXEC. However operating systems have been
+ * becoming increasingly reluctant to accept this practice (e.g. #17353,
+ * #12657) and for good reason: writable code is ripe for exploitation.
+ *
+ * Consequently mmapForLinker now maps its memory with PROT_READ|PROT_WRITE.
+ * After the linker has finished filling/relocating the mapping it must then
+ * call mmapForLinkerMarkExecutable on the sections of the mapping which
+ * contain executable code.
+ *
+ * Note that the m32 allocator handles protection of its allocations. For this
+ * reason the caller to m32_alloc() must tell the allocator whether the
+ * allocation needs to be executable. The caller must then ensure that they
+ * call m32_flush() after they are finished filling the region, which will
+ * cause the allocator to change the protection bits to PROT_READ|PROT_EXEC.
+ *
+ */
+
+/*
+ * Mark an portion of a mapping previously reserved by mmapForLinker
+ * as executable (but not writable).
+ */
+void mmapForLinkerMarkExecutable(void *start, size_t len)
+{
+ IF_DEBUG(linker,
+ debugBelch("mmapForLinkerMarkExecutable: protecting %" FMT_Word
+ " bytes starting at %p\n", (W_)len, start));
+ if (mprotect(start, len, PROT_READ|PROT_EXEC) == -1) {
+ barf("mmapForLinkerMarkExecutable: mprotect: %s\n", strerror(errno));
+ }
+}
#endif
/*
@@ -1184,8 +1218,7 @@ void freeObjectCode (ObjectCode *oc)
IF_DEBUG(zero_on_gc,
memset(oc->sections[i].start,
0x00, oc->sections[i].size));
- m32_free(oc->sections[i].start,
- oc->sections[i].size);
+ // Freed by m32_allocator_free
break;
#endif
case SECTION_MALLOC:
@@ -1215,7 +1248,7 @@ void freeObjectCode (ObjectCode *oc)
if (RTS_LINKER_USE_MMAP) {
if (!USE_CONTIGUOUS_MMAP && !RtsFlags.MiscFlags.linkerAlwaysPic &&
oc->symbol_extras != NULL) {
- m32_free(oc->symbol_extras, sizeof(SymbolExtra) * oc->n_symbol_extras);
+ // Freed by m32_allocator_free
}
}
else {
@@ -1230,7 +1263,11 @@ void freeObjectCode (ObjectCode *oc)
ocDeinit_ELF(oc);
#endif
- m32_allocator_free(oc->m32);
+#if RTS_LINKER_USE_MMAP == 1
+ m32_allocator_free(oc->rx_m32);
+ m32_allocator_free(oc->rw_m32);
+#endif
+
stgFree(oc->fileName);
stgFree(oc->archiveMemberName);
@@ -1310,7 +1347,8 @@ mkOc( pathchar *path, char *image, int imageSize,
oc->next = NULL;
#if RTS_LINKER_USE_MMAP
- oc->m32 = m32_allocator_new();
+ oc->rw_m32 = m32_allocator_new(false);
+ oc->rx_m32 = m32_allocator_new(true);
#endif
IF_DEBUG(linker, debugBelch("mkOc: done\n"));
@@ -1631,7 +1669,16 @@ int ocTryLoad (ObjectCode* oc) {
# endif
if (!r) { return r; }
- m32_allocator_flush(oc->m32);
+#if defined(NEED_SYMBOL_EXTRAS)
+ ocProtectExtras(oc);
+#endif
+
+ // We have finished loading and relocating; flush the m32 allocators to
+ // setup page protections.
+#if RTS_LINKER_USE_MMAP
+ m32_allocator_flush(oc->rx_m32);
+ m32_allocator_flush(oc->rw_m32);
+#endif
// run init/init_array/ctors/mod_init_func