summaryrefslogtreecommitdiff
path: root/rts/linker/Elf.c
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2019-10-28 00:34:09 -0400
committerMarge Bot <ben+marge-bot@smart-cactus.org>2019-11-04 03:41:44 -0500
commit120f2e5343d5ccd3ad117d530018b75302c6482b (patch)
tree66b3e392325172021ef79518a0027de2962a2f84 /rts/linker/Elf.c
parent5d4f16eed151caddf4624ff0a1fc23d5a4475957 (diff)
downloadhaskell-120f2e5343d5ccd3ad117d530018b75302c6482b.tar.gz
rts/linker: Ensure that code isn't writable
For many years the linker would simply map all of its memory with PROT_READ|PROT_WRITE|PROT_EXEC. However operating systems have been becoming increasingly reluctant to accept this practice (e.g. #17353 and #12657) and for good reason: writable code is ripe for exploitation. Consequently mmapForLinker now maps its memory with PROT_READ|PROT_WRITE. After the linker has finished filling/relocating the mapping it must then call mmapForLinkerMarkExecutable on the sections of the mapping which contain executable code. Moreover, to make all of this possible it was necessary to redesign the m32 allocator. First, we gave (in an earlier commit) each ObjectCode its own m32_allocator. This was necessary since code loading and symbol resolution/relocation are currently interleaved, meaning that it is not possible to enforce W^X when symbols from different objects reside in the same page. We then redesigned the m32 allocator to take advantage of the fact that all of the pages allocated with the allocator die at the same time (namely, when the owning ObjectCode is unloaded). This makes a number of things simpler (e.g. no more page reference counting; the interface provided by the allocator for freeing is simpler). See Note [M32 Allocator] for details.
Diffstat (limited to 'rts/linker/Elf.c')
-rw-r--r--rts/linker/Elf.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/rts/linker/Elf.c b/rts/linker/Elf.c
index 313666197b..3e19d3a2db 100644
--- a/rts/linker/Elf.c
+++ b/rts/linker/Elf.c
@@ -778,7 +778,9 @@ ocGetNames_ELF ( ObjectCode* oc )
// (i.e. we cannot map the secions separately), or if the section
// size is small.
else if (!oc->imageMapped || size < getPageSize() / 3) {
- start = m32_alloc(oc->m32, size, 8);
+ bool executable = kind == SECTIONKIND_CODE_OR_RODATA;
+ m32_allocator *allocator = executable ? oc->rx_m32 : oc->rw_m32;
+ start = m32_alloc(allocator, size, 8);
if (start == NULL) goto fail;
memcpy(start, oc->image + offset, size);
alloc = SECTION_M32;
@@ -1769,6 +1771,28 @@ do_Elf_Rela_relocations ( ObjectCode* oc, char* ehdrC,
#endif /* !aarch64_HOST_ARCH */
+static bool
+ocMprotect_Elf( ObjectCode *oc )
+{
+ for(int i=0; i < oc->n_sections; i++) {
+ Section *section = &oc->sections[i];
+ if(section->size == 0) continue;
+ switch (section->kind) {
+ case SECTIONKIND_CODE_OR_RODATA:
+ if (section->alloc != SECTION_M32) {
+ // N.B. m32 handles protection of its allocations during
+ // flushing.
+ mmapForLinkerMarkExecutable(section->mapped_start, section->mapped_size);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ return true;
+}
+
int
ocResolve_ELF ( ObjectCode* oc )
{
@@ -1855,7 +1879,7 @@ ocResolve_ELF ( ObjectCode* oc )
ocFlushInstructionCache( oc );
#endif
- return 1;
+ return ocMprotect_Elf(oc);
}
int ocRunInit_ELF( ObjectCode *oc )