summaryrefslogtreecommitdiff
path: root/Zend/zend_alloc.c
diff options
context:
space:
mode:
authorRasmus Lerdorf <rasmus@lerdorf.com>2019-10-23 14:34:12 -0700
committerRasmus Lerdorf <rasmus@lerdorf.com>2019-10-23 14:34:12 -0700
commitaf57b6330b3cd25f1a4d7dfcebb92181a6f7ff1b (patch)
tree06eb19b184ae93fa030dfe8abe2ddd15ea88e595 /Zend/zend_alloc.c
parent5870efbcf5235bb7328fe7cea3b8e2b92fb9fc0d (diff)
downloadphp-git-af57b6330b3cd25f1a4d7dfcebb92181a6f7ff1b.tar.gz
Reverting push to wrong repo
Diffstat (limited to 'Zend/zend_alloc.c')
-rw-r--r--Zend/zend_alloc.c33
1 files changed, 6 insertions, 27 deletions
diff --git a/Zend/zend_alloc.c b/Zend/zend_alloc.c
index a1d3ad680f..21ccf85049 100644
--- a/Zend/zend_alloc.c
+++ b/Zend/zend_alloc.c
@@ -195,11 +195,6 @@ typedef struct _zend_mm_free_slot zend_mm_free_slot;
typedef struct _zend_mm_chunk zend_mm_chunk;
typedef struct _zend_mm_huge_list zend_mm_huge_list;
-/*
- * 0 means disabled
- * 1 means huge pages
- * 2 means transparent huge pages
- */
int zend_mm_use_huge_pages = 0;
/*
@@ -234,13 +229,6 @@ int zend_mm_use_huge_pages = 0;
* 2 for 5-8, 3 for 9-16 etc) see zend_alloc_sizes.h
*/
-/*
- * For environments where mmap is expensive it can be
- * worthwhile to avoid mmap/munmap churn by raising
- * the minimum number of chunks in emalloc
- */
-int zend_mm_min_chunks = 0;
-
struct _zend_mm_heap {
#if ZEND_MM_CUSTOM
int use_custom_heap;
@@ -474,7 +462,7 @@ static void *zend_mm_mmap(size_t size)
void *ptr;
#ifdef MAP_HUGETLB
- if (zend_mm_use_huge_pages == 1 && size == ZEND_MM_CHUNK_SIZE) {
+ if (zend_mm_use_huge_pages && size == ZEND_MM_CHUNK_SIZE) {
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_HUGETLB, -1, 0);
if (ptr != MAP_FAILED) {
return ptr;
@@ -681,7 +669,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
return NULL;
} else if (ZEND_MM_ALIGNED_OFFSET(ptr, alignment) == 0) {
#ifdef MADV_HUGEPAGE
- if (zend_mm_use_huge_pages == 2) {
+ if (zend_mm_use_huge_pages) {
madvise(ptr, size, MADV_HUGEPAGE);
}
#endif
@@ -714,7 +702,7 @@ static void *zend_mm_chunk_alloc_int(size_t size, size_t alignment)
zend_mm_munmap((char*)ptr + size, alignment - REAL_PAGE_SIZE);
}
# ifdef MADV_HUGEPAGE
- if (zend_mm_use_huge_pages == 2) {
+ if (zend_mm_use_huge_pages) {
madvise(ptr, size, MADV_HUGEPAGE);
}
# endif
@@ -2282,7 +2270,7 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
zend_mm_chunk_free(heap, heap->main_chunk, ZEND_MM_CHUNK_SIZE);
} else {
/* free some cached chunks to keep average count */
- heap->avg_chunks_count = MAX((heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0, zend_mm_min_chunks);
+ heap->avg_chunks_count = (heap->avg_chunks_count + (double)heap->peak_chunks_count) / 2.0;
while ((double)heap->cached_chunks_count + 0.9 > heap->avg_chunks_count &&
heap->cached_chunks) {
p = heap->cached_chunks;
@@ -2290,7 +2278,6 @@ void zend_mm_shutdown(zend_mm_heap *heap, int full, int silent)
zend_mm_chunk_free(heap, p, ZEND_MM_CHUNK_SIZE);
heap->cached_chunks_count--;
}
-
/* clear cached chunks */
p = heap->cached_chunks;
while (p != NULL) {
@@ -2772,16 +2759,8 @@ static void alloc_globals_ctor(zend_alloc_globals *alloc_globals)
#endif
tmp = getenv("USE_ZEND_ALLOC_HUGE_PAGES");
- if (tmp) {
- zend_mm_use_huge_pages = zend_atoi(tmp, 0);
- if (zend_mm_use_huge_pages > 2) {
- zend_mm_use_huge_pages = 1;
- }
- }
-
- tmp = getenv("USE_ZEND_MIN_CHUNKS");
- if (tmp) {
- zend_mm_min_chunks = zend_atoi(tmp, 0);
+ if (tmp && zend_atoi(tmp, 0)) {
+ zend_mm_use_huge_pages = 1;
}
alloc_globals->mm_heap = zend_mm_init();
}