summaryrefslogtreecommitdiff
path: root/ext/opcache/shared_alloc_mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'ext/opcache/shared_alloc_mmap.c')
-rw-r--r--ext/opcache/shared_alloc_mmap.c62
1 files changed, 45 insertions, 17 deletions
diff --git a/ext/opcache/shared_alloc_mmap.c b/ext/opcache/shared_alloc_mmap.c
index dc02d038f5..8f900c1590 100644
--- a/ext/opcache/shared_alloc_mmap.c
+++ b/ext/opcache/shared_alloc_mmap.c
@@ -39,17 +39,10 @@
static int create_segments(size_t requested_size, zend_shared_segment ***shared_segments_p, int *shared_segments_count, char **error_in)
{
zend_shared_segment *shared_segment;
-
- *shared_segments_count = 1;
- *shared_segments_p = (zend_shared_segment **) calloc(1, sizeof(zend_shared_segment) + sizeof(void *));
- if (!*shared_segments_p) {
- *error_in = "calloc";
- return ALLOC_FAILURE;
- }
- shared_segment = (zend_shared_segment *)((char *)(*shared_segments_p) + sizeof(void *));
- (*shared_segments_p)[0] = shared_segment;
-
+ void *p;
#ifdef MAP_HUGETLB
+ size_t huge_page_size = 2 * 1024 * 1024;
+
/* Try to allocate huge pages first to reduce dTLB misses.
* OSes has to be configured properly
* on Linux
@@ -60,21 +53,56 @@ static int create_segments(size_t requested_size, zend_shared_segment ***shared_
* sysctl vm.pmap.pg_ps_enabled entry
* (boot time config only, but enabled by default on most arches).
*/
- shared_segment->p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0);
- if (shared_segment->p != MAP_FAILED) {
- shared_segment->pos = 0;
- shared_segment->size = requested_size;
+ if (requested_size >= huge_page_size && requested_size % huge_page_size == 0) {
+# if defined(__x86_64__) && defined(MAP_32BIT)
+ /* to got HUGE PAGES in low 32-bit address we have to reserve address
+ space and then remap it using MAP_HUGETLB */
- return ALLOC_SUCCESS;
+ p = mmap(NULL, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, -1, 0);
+ if (p != MAP_FAILED) {
+ munmap(p, requested_size);
+ p = (void*)(ZEND_MM_ALIGNED_SIZE_EX((ptrdiff_t)p, huge_page_size));
+ p = mmap(p, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT|MAP_HUGETLB|MAP_FIXED, -1, 0);
+ if (p != MAP_FAILED) {
+ goto success;
+ } else {
+ p = mmap(NULL, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, -1, 0);
+ if (p != MAP_FAILED) {
+ goto success;
+ }
+ }
+ }
+# endif
+ p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_HUGETLB, -1, 0);
+ if (p != MAP_FAILED) {
+ goto success;
+ }
+ }
+#elif defined(PREFER_MAP_32BIT) && defined(__x86_64__) && defined(MAP_32BIT)
+ p = mmap(NULL, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS|MAP_32BIT, -1, 0);
+ if (p != MAP_FAILED) {
+ goto success;
}
#endif
- shared_segment->p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
- if (shared_segment->p == MAP_FAILED) {
+ p = mmap(0, requested_size, PROT_READ | PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0);
+ if (p == MAP_FAILED) {
*error_in = "mmap";
return ALLOC_FAILURE;
}
+success: ZEND_ATTRIBUTE_UNUSED;
+ *shared_segments_count = 1;
+ *shared_segments_p = (zend_shared_segment **) calloc(1, sizeof(zend_shared_segment) + sizeof(void *));
+ if (!*shared_segments_p) {
+ munmap(p, requested_size);
+ *error_in = "calloc";
+ return ALLOC_FAILURE;
+ }
+ shared_segment = (zend_shared_segment *)((char *)(*shared_segments_p) + sizeof(void *));
+ (*shared_segments_p)[0] = shared_segment;
+
+ shared_segment->p = p;
shared_segment->pos = 0;
shared_segment->size = requested_size;