summaryrefslogtreecommitdiff
path: root/elf/dl-map-segments.h
diff options
context:
space:
mode:
Diffstat (limited to 'elf/dl-map-segments.h')
-rw-r--r--elf/dl-map-segments.h72
1 files changed, 60 insertions, 12 deletions
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index 024175b2d5..ce51585ce4 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -18,15 +18,18 @@
<https://www.gnu.org/licenses/>. */
#include <dl-load.h>
+#ifdef __CHERI_PURE_CAPABILITY__
+# include <cheri_perms.h>
+#endif
/* Map a segment and align it properly. */
-static __always_inline ElfW(Addr)
+static __always_inline elfptr_t
_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
const size_t maplength, int fd)
{
if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
- return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+ return (elfptr_t) __mmap ((void *) mappref, maplength, c->prot,
MAP_COPY|MAP_FILE, fd, c->mapoff);
/* If the segment alignment > the page size, allocate enough space to
@@ -34,15 +37,15 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
ElfW(Addr) maplen = (maplength >= c->mapalign
? (maplength + c->mapalign)
: (2 * c->mapalign));
- ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+ elfptr_t map_start = (elfptr_t) __mmap ((void *) mappref, maplen,
PROT_NONE,
MAP_ANONYMOUS|MAP_PRIVATE,
-1, 0);
if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
return map_start;
- ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
- map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+ elfptr_t map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+ map_start_aligned = (elfptr_t) __mmap ((void *) map_start_aligned,
maplength, c->prot,
MAP_COPY|MAP_FILE|MAP_FIXED,
fd, c->mapoff);
@@ -54,7 +57,7 @@ _dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
ElfW(Addr) delta = map_start_aligned - map_start;
if (delta)
__munmap ((void *) map_start, delta);
- ElfW(Addr) map_end = map_start_aligned + maplength;
+ elfptr_t map_end = map_start + (map_start_aligned - map_start) + maplength;
map_end = ALIGN_UP (map_end, GLRO(dl_pagesize));
delta = map_start + maplen - map_end;
if (delta)
@@ -79,6 +82,10 @@ _dl_map_segments (struct link_map *l, int fd,
struct link_map *loader)
{
const struct loadcmd *c = loadcmds;
+#ifdef __CHERI_PURE_CAPABILITY__
+ ElfW(Addr) rw_start = -1;
+ ElfW(Addr) rw_end = 0;
+#endif
if (__glibc_likely (type == ET_DYN))
{
@@ -116,7 +123,7 @@ _dl_map_segments (struct link_map *l, int fd,
c->mapend))
return N_("ELF load command address/offset not page-aligned");
if (__glibc_unlikely
- (__mprotect ((caddr_t) (l->l_addr + c->mapend),
+ (__mprotect ((caddr_t) dl_rx_ptr (l, c->mapend),
loadcmds[nloadcmds - 1].mapstart - c->mapend,
PROT_NONE) < 0))
return DL_MAP_SEGMENTS_ERROR_MPROTECT;
@@ -126,6 +133,22 @@ _dl_map_segments (struct link_map *l, int fd,
goto postmap;
}
+#ifdef __CHERI_PURE_CAPABILITY__
+ else
+ {
+ /* Need a single capability to cover all load segments. */
+ void *p = __mmap ((void *) c->mapstart, maplength, c->prot,
+ MAP_FIXED|MAP_COPY|MAP_FILE,
+ fd, c->mapoff);
+ if (p == MAP_FAILED)
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+ l->l_map_start = (elfptr_t) p;
+ l->l_map_end = l->l_map_start + maplength;
+ l->l_contiguous = !has_holes;
+
+ goto postmap;
+ }
+#endif
/* Remember which part of the address space this object uses. */
l->l_map_start = c->mapstart + l->l_addr;
@@ -134,10 +157,10 @@ _dl_map_segments (struct link_map *l, int fd,
while (c < &loadcmds[nloadcmds])
{
- if (c->mapend > c->mapstart
+ if (c->dataend > c->mapstart
/* Map the segment contents from the file. */
- && (__mmap ((void *) (l->l_addr + c->mapstart),
- c->mapend - c->mapstart, c->prot,
+ && (__mmap ((void *) dl_rx_ptr (l, c->mapstart),
+ c->dataend - c->mapstart, c->prot,
MAP_FIXED|MAP_COPY|MAP_FILE,
fd, c->mapoff)
== MAP_FAILED))
@@ -146,13 +169,28 @@ _dl_map_segments (struct link_map *l, int fd,
postmap:
_dl_postprocess_loadcmd (l, header, c);
+#ifdef __CHERI_PURE_CAPABILITY__
+ if (c->prot & PROT_WRITE)
+ {
+ if (l->l_rw_count >= DL_MAX_RW_COUNT)
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT; // TODO: right error code
+ if (c->mapstart < rw_start)
+ rw_start = c->mapstart;
+ if (c->allocend > rw_end)
+ rw_end = c->allocend;
+ l->l_rw_range[l->l_rw_count].start = l->l_addr + c->mapstart;
+ l->l_rw_range[l->l_rw_count].end = l->l_addr + c->allocend;
+ l->l_rw_count++;
+ }
+#endif
+
if (c->allocend > c->dataend)
{
/* Extra zero pages should appear at the end of this segment,
after the data mapped from the file. */
- ElfW(Addr) zero, zeroend, zeropage;
+ elfptr_t zero, zeroend, zeropage;
- zero = l->l_addr + c->dataend;
+ zero = dl_rx_ptr (l, c->dataend);
zeroend = l->l_addr + c->allocend;
zeropage = ((zero + GLRO(dl_pagesize) - 1)
& ~(GLRO(dl_pagesize) - 1));
@@ -194,6 +232,16 @@ _dl_map_segments (struct link_map *l, int fd,
++c;
}
+#ifdef __CHERI_PURE_CAPABILITY__
+ if (l->l_rw_count > 0)
+ {
+ l->l_rw_start = __builtin_cheri_address_set (l->l_map_start, l->l_addr + rw_start);
+ l->l_rw_start = __builtin_cheri_bounds_set (l->l_rw_start, rw_end - rw_start);
+ l->l_rw_start = __builtin_cheri_perms_and (l->l_rw_start, CAP_PERM_MASK_RW);
+ }
+ l->l_map_start = __builtin_cheri_perms_and (l->l_map_start, CAP_PERM_MASK_RX);
+#endif
+
/* Notify ELF_PREFERRED_ADDRESS that we have to load this one
fixed. */
ELF_FIXED_ADDRESS (loader, c->mapstart);