summaryrefslogtreecommitdiff
path: root/memory
diff options
context:
space:
mode:
authorStefan Fritsch <sf@apache.org>2014-05-09 20:29:25 +0000
committerStefan Fritsch <sf@apache.org>2014-05-09 20:29:25 +0000
commitef852b52037ce2b5b5262ca3414783753be9cc7f (patch)
tree1a4536e923b882983b727fb6bd3079f613118263 /memory
parent9aaf97b879e299996f5f5c234ca4ce763c35f715 (diff)
downloadapr-ef852b52037ce2b5b5262ca3414783753be9cc7f.tar.gz
Add option to use guard pages
Add new --enable-allocator-guard-pages configure option which works like --enable-allocator-uses-mmap, but will also add inaccessible guard pages before and after each memnode. This will result in higher ressource usage but allow to find/protect against certain buffer overflow/overread bugs. git-svn-id: https://svn.apache.org/repos/asf/apr/apr/trunk@1593615 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'memory')
-rw-r--r--memory/unix/apr_pools.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/memory/unix/apr_pools.c b/memory/unix/apr_pools.c
index f12f27e9e..fdfe4af6f 100644
--- a/memory/unix/apr_pools.c
+++ b/memory/unix/apr_pools.c
@@ -40,6 +40,10 @@
#include <unistd.h> /* for getpid and sysconf */
#endif
+#if APR_ALLOCATOR_GUARD_PAGES && !APR_ALLOCATOR_USES_MMAP
+#define APR_ALLOCATOR_USES_MMAP 1
+#endif
+
#if APR_ALLOCATOR_USES_MMAP
#include <sys/mman.h>
#endif
@@ -76,6 +80,16 @@ static unsigned int boundary_size;
#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
#endif
+#if APR_ALLOCATOR_GUARD_PAGES
+#if defined(_SC_PAGESIZE)
+#define GUARDPAGE_SIZE boundary_size
+#else
+#error Cannot determine page size
+#endif /* _SC_PAGESIZE */
+#else
+#define GUARDPAGE_SIZE 0
+#endif /* APR_ALLOCATOR_GUARD_PAGES */
+
/*
* Timing constants for killing subprocesses
* There is a total 3-second delay between sending a SIGINT
@@ -158,7 +172,8 @@ APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator)
while ((node = *ref) != NULL) {
*ref = node->next;
#if APR_ALLOCATOR_USES_MMAP
- munmap(node, (node->index+1) << BOUNDARY_INDEX);
+ munmap((char *)node - GUARDPAGE_SIZE,
+ 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX));
#else
free(node);
#endif
@@ -347,7 +362,10 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
/* If we haven't got a suitable node, malloc a new one
* and initialize it.
*/
-#if APR_ALLOCATOR_USES_MMAP
+#if APR_ALLOCATOR_GUARD_PAGES
+ if ((node = mmap(NULL, size + 2 * GUARDPAGE_SIZE, PROT_NONE,
+ MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
+#elif APR_ALLOCATOR_USES_MMAP
if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
#else
@@ -355,6 +373,13 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
#endif
return NULL;
+#if APR_ALLOCATOR_GUARD_PAGES
+ node = (apr_memnode_t *)((char *)node + GUARDPAGE_SIZE);
+ if (mprotect(node, size, PROT_READ|PROT_WRITE) != 0) {
+ munmap((char *)node - GUARDPAGE_SIZE, size + 2 * GUARDPAGE_SIZE);
+ return NULL;
+ }
+#endif
node->index = index;
node->endp = (char *)node + size;
@@ -437,7 +462,8 @@ void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
node = freelist;
freelist = node->next;
#if APR_ALLOCATOR_USES_MMAP
- munmap(node, (node->index+1) << BOUNDARY_INDEX);
+ munmap((char *)node - GUARDPAGE_SIZE,
+ 2 * GUARDPAGE_SIZE + ((node->index+1) << BOUNDARY_INDEX));
#else
free(node);
#endif