summaryrefslogtreecommitdiff
path: root/memory
diff options
context:
space:
mode:
authorStefan Fritsch <sf@apache.org>2011-02-18 21:51:56 +0000
committerStefan Fritsch <sf@apache.org>2011-02-18 21:51:56 +0000
commit1fdceddf1025132376760a89a5e5ebde319faa33 (patch)
tree5f5950c7dd794570c0cf496997e4fdd33f6cfdb8 /memory
parent7bfa95ed55377e3541213b3816cb43bdfb6c0beb (diff)
downloadapr-1fdceddf1025132376760a89a5e5ebde319faa33.tar.gz
Add new configure option --enable-allocator-uses-mmap to use mmap
instead of malloc in apr_allocator_alloc(). This greatly reduces memory fragmentation with malloc implementations (e.g. glibc) that don't handle allocationss of a page-size-multiples in an efficient way. It also makes apr_allocator_max_free_set() actually have some effect on such platforms. The handling of page sizes other than 4k seems like a lot of trouble for a very small number of platforms, but there does not seem to be a reasonable way to check this at compile time. git-svn-id: https://svn.apache.org/repos/asf/apr/apr/trunk@1072165 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'memory')
-rw-r--r--memory/unix/apr_pools.c57
1 files changed, 56 insertions, 1 deletions
diff --git a/memory/unix/apr_pools.c b/memory/unix/apr_pools.c
index ca76db33e..96aff7a08 100644
--- a/memory/unix/apr_pools.c
+++ b/memory/unix/apr_pools.c
@@ -36,9 +36,12 @@
#endif
#if APR_HAVE_UNISTD_H
-#include <unistd.h> /* for getpid */
+#include <unistd.h> /* for getpid and sysconf */
#endif
+#if APR_ALLOCATOR_USES_MMAP
+#include <sys/mman.h>
+#endif
/*
* Magic numbers
@@ -47,8 +50,15 @@
#define MIN_ALLOC 8192
#define MAX_INDEX 20
+#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+static unsigned int boundary_index;
+static unsigned int boundary_size;
+#define BOUNDARY_INDEX boundary_index
+#define BOUNDARY_SIZE boundary_size
+#else
#define BOUNDARY_INDEX 12
#define BOUNDARY_SIZE (1 << BOUNDARY_INDEX)
+#endif
/*
* Timing constants for killing subprocesses
@@ -131,7 +141,11 @@ APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator)
ref = &allocator->free[index];
while ((node = *ref) != NULL) {
*ref = node->next;
+#if APR_ALLOCATOR_USES_MMAP
+ munmap(node, (node->index+1) << BOUNDARY_INDEX);
+#else
free(node);
+#endif
}
}
@@ -323,7 +337,12 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size)
/* If we haven't got a suitable node, malloc a new one
* and initialize it.
*/
+#if APR_ALLOCATOR_USES_MMAP
+ if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED)
+#else
if ((node = malloc(size)) == NULL)
+#endif
return NULL;
node->next = NULL;
@@ -400,7 +419,11 @@ void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node)
while (freelist != NULL) {
node = freelist;
freelist = node->next;
+#if APR_ALLOCATOR_USES_MMAP
+ munmap(node, (node->index+1) << BOUNDARY_INDEX);
+#else
free(node);
+#endif
}
}
@@ -548,6 +571,14 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void)
if (apr_pools_initialized++)
return APR_SUCCESS;
+#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+ boundary_size = sysconf(_SC_PAGESIZE);
+ boundary_index = 12;
+ while ( (1 << boundary_index) < boundary_size)
+ boundary_index++;
+ boundary_size = (1 << boundary_index);
+#endif
+
if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) {
apr_pools_initialized = 0;
return rv;
@@ -923,6 +954,7 @@ APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
if (!apr_pools_initialized)
return APR_ENOPOOL;
if ((pool_allocator = allocator) == NULL) {
+#if !APR_ALLOCATOR_USES_MMAP
if ((pool_allocator = malloc(MIN_ALLOC)) == NULL) {
if (abort_fn)
abort_fn(APR_ENOMEM);
@@ -936,6 +968,21 @@ APR_DECLARE(apr_status_t) apr_pool_create_unmanaged_ex(apr_pool_t **newpool,
node->index = 1;
node->first_avail = (char *)node + APR_MEMNODE_T_SIZE;
node->endp = (char *)pool_allocator + MIN_ALLOC;
+#else
+ if (apr_allocator_create(&pool_allocator) != APR_SUCCESS) {
+ if (abort_fn)
+ abort_fn(APR_ENOMEM);
+
+ return APR_ENOMEM;
+ }
+ if ((node = allocator_alloc(pool_allocator,
+ MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
+ if (abort_fn)
+ abort_fn(APR_ENOMEM);
+
+ return APR_ENOMEM;
+ }
+#endif
}
else if ((node = allocator_alloc(pool_allocator,
MIN_ALLOC - APR_MEMNODE_T_SIZE)) == NULL) {
@@ -1331,6 +1378,14 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void)
if (apr_pools_initialized++)
return APR_SUCCESS;
+#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE)
+ boundary_size = sysconf(_SC_PAGESIZE);
+ boundary_index = 12;
+ while ( (1 << boundary_index) < boundary_size)
+ boundary_index++;
+ boundary_size = (1 << boundary_index);
+#endif
+
/* Since the debug code works a bit differently then the
* regular pools code, we ask for a lock here. The regular
* pools code has got this lock embedded in the global