diff options
author | antirez <antirez@gmail.com> | 2015-10-06 16:18:30 +0200 |
---|---|---|
committer | antirez <antirez@gmail.com> | 2015-10-06 16:55:37 +0200 |
commit | a9951b1b6a326532163e0fe4ee1a26e972258a1e (patch) | |
tree | ca555f37238537175cc1b34aa62a9f873026047f /deps/jemalloc/src/chunk.c | |
parent | e3ded0273c43986a49ddd9d5fb4a20d187d015de (diff) | |
download | redis-a9951b1b6a326532163e0fe4ee1a26e972258a1e.tar.gz |
Jemalloc updated to 4.0.3.
Diffstat (limited to 'deps/jemalloc/src/chunk.c')
-rw-r--r-- | deps/jemalloc/src/chunk.c | 792 |
1 files changed, 579 insertions, 213 deletions
diff --git a/deps/jemalloc/src/chunk.c b/deps/jemalloc/src/chunk.c index 90ab116ae..6ba1ca7a5 100644 --- a/deps/jemalloc/src/chunk.c +++ b/deps/jemalloc/src/chunk.c @@ -5,129 +5,315 @@ /* Data. */ const char *opt_dss = DSS_DEFAULT; -size_t opt_lg_chunk = LG_CHUNK_DEFAULT; +size_t opt_lg_chunk = 0; -malloc_mutex_t chunks_mtx; -chunk_stats_t stats_chunks; +/* Used exclusively for gdump triggering. */ +static size_t curchunks; +static size_t highchunks; -/* - * Trees of chunks that were previously allocated (trees differ only in node - * ordering). These are used when allocating chunks, in an attempt to re-use - * address space. Depending on function, different tree orderings are needed, - * which is why there are two trees with the same contents. - */ -static extent_tree_t chunks_szad_mmap; -static extent_tree_t chunks_ad_mmap; -static extent_tree_t chunks_szad_dss; -static extent_tree_t chunks_ad_dss; - -rtree_t *chunks_rtree; +rtree_t chunks_rtree; /* Various chunk-related settings. */ size_t chunksize; size_t chunksize_mask; /* (chunksize - 1). */ size_t chunk_npages; -size_t map_bias; -size_t arena_maxclass; /* Max size class for arenas. */ + +static void *chunk_alloc_default(void *new_addr, size_t size, + size_t alignment, bool *zero, bool *commit, unsigned arena_ind); +static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind); +static bool chunk_commit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_purge_default(void *chunk, size_t size, size_t offset, + size_t length, unsigned arena_ind); +static bool chunk_split_default(void *chunk, size_t size, size_t size_a, + size_t size_b, bool committed, unsigned arena_ind); +static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, + size_t size_b, bool committed, unsigned arena_ind); + +const chunk_hooks_t chunk_hooks_default = { + chunk_alloc_default, + chunk_dalloc_default, + chunk_commit_default, + chunk_decommit_default, + chunk_purge_default, + chunk_split_default, + chunk_merge_default +}; /******************************************************************************/ -/* Function prototypes for non-inline static functions. */ +/* + * Function prototypes for static functions that are referenced prior to + * definition. + */ -static void *chunk_recycle(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, size_t size, size_t alignment, bool base, - bool *zero); -static void chunk_record(extent_tree_t *chunks_szad, - extent_tree_t *chunks_ad, void *chunk, size_t size); +static void chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, bool zeroed, bool committed); /******************************************************************************/ +static chunk_hooks_t +chunk_hooks_get_locked(arena_t *arena) +{ + + return (arena->chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_get(arena_t *arena) +{ + chunk_hooks_t chunk_hooks; + + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks = chunk_hooks_get_locked(arena); + malloc_mutex_unlock(&arena->chunks_mtx); + + return (chunk_hooks); +} + +chunk_hooks_t +chunk_hooks_set(arena_t *arena, const chunk_hooks_t *chunk_hooks) +{ + chunk_hooks_t old_chunk_hooks; + + malloc_mutex_lock(&arena->chunks_mtx); + old_chunk_hooks = arena->chunk_hooks; + /* + * Copy each field atomically so that it is impossible for readers to + * see partially updated pointers. There are places where readers only + * need one hook function pointer (therefore no need to copy the + * entirety of arena->chunk_hooks), and stale reads do not affect + * correctness, so they perform unlocked reads. + */ +#define ATOMIC_COPY_HOOK(n) do { \ + union { \ + chunk_##n##_t **n; \ + void **v; \ + } u; \ + u.n = &arena->chunk_hooks.n; \ + atomic_write_p(u.v, chunk_hooks->n); \ +} while (0) + ATOMIC_COPY_HOOK(alloc); + ATOMIC_COPY_HOOK(dalloc); + ATOMIC_COPY_HOOK(commit); + ATOMIC_COPY_HOOK(decommit); + ATOMIC_COPY_HOOK(purge); + ATOMIC_COPY_HOOK(split); + ATOMIC_COPY_HOOK(merge); +#undef ATOMIC_COPY_HOOK + malloc_mutex_unlock(&arena->chunks_mtx); + + return (old_chunk_hooks); +} + +static void +chunk_hooks_assure_initialized_impl(arena_t *arena, chunk_hooks_t *chunk_hooks, + bool locked) +{ + static const chunk_hooks_t uninitialized_hooks = + CHUNK_HOOKS_INITIALIZER; + + if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == + 0) { + *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : + chunk_hooks_get(arena); + } +} + +static void +chunk_hooks_assure_initialized_locked(arena_t *arena, + chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(arena, chunk_hooks, true); +} + +static void +chunk_hooks_assure_initialized(arena_t *arena, chunk_hooks_t *chunk_hooks) +{ + + chunk_hooks_assure_initialized_impl(arena, chunk_hooks, false); +} + +bool +chunk_register(const void *chunk, const extent_node_t *node) +{ + + assert(extent_node_addr_get(node) == chunk); + + if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) + return (true); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nadd = (size == 0) ? 1 : size / chunksize; + size_t cur = atomic_add_z(&curchunks, nadd); + size_t high = atomic_read_z(&highchunks); + while (cur > high && atomic_cas_z(&highchunks, high, cur)) { + /* + * Don't refresh cur, because it may have decreased + * since this thread lost the highchunks update race. + */ + high = atomic_read_z(&highchunks); + } + if (cur > high && prof_gdump_get_unlocked()) + prof_gdump(); + } + + return (false); +} + +void +chunk_deregister(const void *chunk, const extent_node_t *node) +{ + bool err; + + err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); + assert(!err); + if (config_prof && opt_prof) { + size_t size = extent_node_size_get(node); + size_t nsub = (size == 0) ? 1 : size / chunksize; + assert(atomic_read_z(&curchunks) >= nsub); + atomic_sub_z(&curchunks, nsub); + } +} + +/* + * Do first-best-fit chunk selection, i.e. select the lowest chunk that best + * fits. + */ +static extent_node_t * +chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad, + extent_tree_t *chunks_ad, size_t size) +{ + extent_node_t key; + + assert(size == CHUNK_CEILING(size)); + + extent_node_init(&key, arena, NULL, size, false, false); + return (extent_tree_szad_nsearch(chunks_szad, &key)); +} + static void * -chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, - size_t alignment, bool base, bool *zero) +chunk_recycle(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, + bool dalloc_node) { void *ret; extent_node_t *node; - extent_node_t key; size_t alloc_size, leadsize, trailsize; - bool zeroed; + bool zeroed, committed; - if (base) { - /* - * This function may need to call base_node_{,de}alloc(), but - * the current chunk allocation request is on behalf of the - * base allocator. Avoid deadlock (and if that weren't an - * issue, potential for infinite recursion) by returning NULL. - */ - return (NULL); - } + assert(new_addr == NULL || alignment == chunksize); + /* + * Cached chunks use the node linkage embedded in their headers, in + * which case dalloc_node is true, and new_addr is non-NULL because + * we're operating on a specific chunk. + */ + assert(dalloc_node || new_addr != NULL); - alloc_size = size + alignment - chunksize; + alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize)); /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); - key.addr = NULL; - key.size = alloc_size; - malloc_mutex_lock(&chunks_mtx); - node = extent_tree_szad_nsearch(chunks_szad, &key); - if (node == NULL) { - malloc_mutex_unlock(&chunks_mtx); + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + if (new_addr != NULL) { + extent_node_t key; + extent_node_init(&key, arena, new_addr, alloc_size, false, + false); + node = extent_tree_ad_search(chunks_ad, &key); + } else { + node = chunk_first_best_fit(arena, chunks_szad, chunks_ad, + alloc_size); + } + if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < + size)) { + malloc_mutex_unlock(&arena->chunks_mtx); return (NULL); } - leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) - - (uintptr_t)node->addr; - assert(node->size >= leadsize + size); - trailsize = node->size - leadsize - size; - ret = (void *)((uintptr_t)node->addr + leadsize); - zeroed = node->zeroed; + leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), + alignment) - (uintptr_t)extent_node_addr_get(node); + assert(new_addr == NULL || leadsize == 0); + assert(extent_node_size_get(node) >= leadsize + size); + trailsize = extent_node_size_get(node) - leadsize - size; + ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); + zeroed = extent_node_zeroed_get(node); if (zeroed) - *zero = true; + *zero = true; + committed = extent_node_committed_get(node); + if (committed) + *commit = true; + /* Split the lead. */ + if (leadsize != 0 && + chunk_hooks->split(extent_node_addr_get(node), + extent_node_size_get(node), leadsize, size, false, arena->ind)) { + malloc_mutex_unlock(&arena->chunks_mtx); + return (NULL); + } /* Remove node from the tree. */ extent_tree_szad_remove(chunks_szad, node); extent_tree_ad_remove(chunks_ad, node); + arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ - node->size = leadsize; + extent_node_size_set(node, leadsize); extent_tree_szad_insert(chunks_szad, node); extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (trailsize != 0) { + /* Split the trail. */ + if (chunk_hooks->split(ret, size + trailsize, size, + trailsize, false, arena->ind)) { + if (dalloc_node && node != NULL) + arena_node_dalloc(arena, node); + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, + cache, ret, size + trailsize, zeroed, committed); + return (NULL); + } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { - /* - * An additional node is required, but - * base_node_alloc() can cause a new base chunk to be - * allocated. Drop chunks_mtx in order to avoid - * deadlock, and if node allocation fails, deallocate - * the result before returning an error. - */ - malloc_mutex_unlock(&chunks_mtx); - node = base_node_alloc(); + node = arena_node_alloc(arena); if (node == NULL) { - chunk_dealloc(ret, size, true); + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, + chunks_ad, cache, ret, size + trailsize, + zeroed, committed); return (NULL); } - malloc_mutex_lock(&chunks_mtx); } - node->addr = (void *)((uintptr_t)(ret) + size); - node->size = trailsize; - node->zeroed = zeroed; + extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), + trailsize, zeroed, committed); extent_tree_szad_insert(chunks_szad, node); extent_tree_ad_insert(chunks_ad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } - malloc_mutex_unlock(&chunks_mtx); + if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { + malloc_mutex_unlock(&arena->chunks_mtx); + chunk_record(arena, chunk_hooks, chunks_szad, chunks_ad, cache, + ret, size, zeroed, committed); + return (NULL); + } + malloc_mutex_unlock(&arena->chunks_mtx); - if (node != NULL) - base_node_dealloc(node); + assert(dalloc_node || node != NULL); + if (dalloc_node && node != NULL) + arena_node_dalloc(arena, node); if (*zero) { - if (zeroed == false) + if (!zeroed) memset(ret, 0, size); else if (config_debug) { size_t i; size_t *p = (size_t *)(uintptr_t)ret; - VALGRIND_MAKE_MEM_DEFINED(ret, size); + JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } @@ -136,138 +322,214 @@ chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size, } /* - * If the caller specifies (*zero == false), it is still possible to receive - * zeroed memory, in which case *zero is toggled to true. arena_chunk_alloc() - * takes advantage of this to avoid demanding zeroed chunks, but taking - * advantage of them if they are returned. + * If the caller specifies (!*zero), it is still possible to receive zeroed + * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes + * advantage of this to avoid demanding zeroed chunks, but taking advantage of + * them if they are returned. */ -void * -chunk_alloc(size_t size, size_t alignment, bool base, bool *zero, - dss_prec_t dss_prec) +static void * +chunk_alloc_core(arena_t *arena, void *new_addr, size_t size, size_t alignment, + bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; + chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); + /* Retained. */ + if ((ret = chunk_recycle(arena, &chunk_hooks, + &arena->chunks_szad_retained, &arena->chunks_ad_retained, false, + new_addr, size, alignment, zero, commit, true)) != NULL) + return (ret); + /* "primary" dss. */ - if (config_dss && dss_prec == dss_prec_primary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } - /* mmap. */ - if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL) - goto label_return; + if (have_dss && dss_prec == dss_prec_primary && (ret = + chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); + /* + * mmap. Requesting an address is not implemented for + * chunk_alloc_mmap(), so only call it if (new_addr == NULL). + */ + if (new_addr == NULL && (ret = chunk_alloc_mmap(size, alignment, zero, + commit)) != NULL) + return (ret); /* "secondary" dss. */ - if (config_dss && dss_prec == dss_prec_secondary) { - if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size, - alignment, base, zero)) != NULL) - goto label_return; - if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL) - goto label_return; - } + if (have_dss && dss_prec == dss_prec_secondary && (ret = + chunk_alloc_dss(arena, new_addr, size, alignment, zero, commit)) != + NULL) + return (ret); /* All strategies for allocation failed. */ - ret = NULL; -label_return: - if (ret != NULL) { - if (config_ivsalloc && base == false) { - if (rtree_set(chunks_rtree, (uintptr_t)ret, 1)) { - chunk_dealloc(ret, size, true); - return (NULL); - } - } - if (config_stats || config_prof) { - bool gdump; - malloc_mutex_lock(&chunks_mtx); - if (config_stats) - stats_chunks.nchunks += (size / chunksize); - stats_chunks.curchunks += (size / chunksize); - if (stats_chunks.curchunks > stats_chunks.highchunks) { - stats_chunks.highchunks = - stats_chunks.curchunks; - if (config_prof) - gdump = true; - } else if (config_prof) - gdump = false; - malloc_mutex_unlock(&chunks_mtx); - if (config_prof && opt_prof && opt_prof_gdump && gdump) - prof_gdump(); - } - if (config_valgrind) - VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - } - assert(CHUNK_ADDR2BASE(ret) == ret); + return (NULL); +} + +void * +chunk_alloc_base(size_t size) +{ + void *ret; + bool zero, commit; + + /* + * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() + * because it's critical that chunk_alloc_base() return untouched + * demand-zeroed virtual memory. + */ + zero = true; + commit = true; + ret = chunk_alloc_mmap(size, chunksize, &zero, &commit); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + return (ret); } -static void -chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, - size_t size) +void * +chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool dalloc_node) { - bool unzeroed; - extent_node_t *xnode, *node, *prev, *xprev, key; + void *ret; + bool commit; + + assert(size != 0); + assert((size & chunksize_mask) == 0); + assert(alignment != 0); + assert((alignment & chunksize_mask) == 0); - unzeroed = pages_purge(chunk, size); - VALGRIND_MAKE_MEM_NOACCESS(chunk, size); + commit = true; + ret = chunk_recycle(arena, chunk_hooks, &arena->chunks_szad_cached, + &arena->chunks_ad_cached, true, new_addr, size, alignment, zero, + &commit, dalloc_node); + if (ret == NULL) + return (NULL); + assert(commit); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); + return (ret); +} + +static arena_t * +chunk_arena_get(unsigned arena_ind) +{ + arena_t *arena; + /* Dodge tsd for a0 in order to avoid bootstrapping issues. */ + arena = (arena_ind == 0) ? a0get() : arena_get(tsd_fetch(), arena_ind, + false, true); /* - * Allocate a node before acquiring chunks_mtx even though it might not - * be needed, because base_node_alloc() may cause a new base chunk to - * be allocated, which could cause deadlock if chunks_mtx were already - * held. + * The arena we're allocating on behalf of must have been initialized + * already. */ - xnode = base_node_alloc(); - /* Use xprev to implement conditional deferred deallocation of prev. */ - xprev = NULL; + assert(arena != NULL); + return (arena); +} + +static void * +chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, + bool *commit, unsigned arena_ind) +{ + void *ret; + arena_t *arena; + + arena = chunk_arena_get(arena_ind); + ret = chunk_alloc_core(arena, new_addr, size, alignment, zero, + commit, arena->dss_prec); + if (ret == NULL) + return (NULL); + if (config_valgrind) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); - malloc_mutex_lock(&chunks_mtx); - key.addr = (void *)((uintptr_t)chunk + size); + return (ret); +} + +void * +chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, + size_t size, size_t alignment, bool *zero, bool *commit) +{ + void *ret; + + chunk_hooks_assure_initialized(arena, chunk_hooks); + ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, + arena->ind); + if (ret == NULL) + return (NULL); + if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); + return (ret); +} + +static void +chunk_record(arena_t *arena, chunk_hooks_t *chunk_hooks, + extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, bool cache, + void *chunk, size_t size, bool zeroed, bool committed) +{ + bool unzeroed; + extent_node_t *node, *prev; + extent_node_t key; + + assert(!cache || !zeroed); + unzeroed = cache || !zeroed; + JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); + + malloc_mutex_lock(&arena->chunks_mtx); + chunk_hooks_assure_initialized_locked(arena, chunk_hooks); + extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, + false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ - if (node != NULL && node->addr == key.addr) { + if (node != NULL && extent_node_addr_get(node) == + extent_node_addr_get(&key) && extent_node_committed_get(node) == + committed && !chunk_hooks->merge(chunk, size, + extent_node_addr_get(node), extent_node_size_get(node), false, + arena->ind)) { /* * Coalesce chunk with the following address range. This does * not change the position within chunks_ad, so only * remove/insert from/into chunks_szad. */ extent_tree_szad_remove(chunks_szad, node); - node->addr = chunk; - node->size += size; - node->zeroed = (node->zeroed && (unzeroed == false)); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, chunk); + extent_node_size_set(node, size + extent_node_size_get(node)); + extent_node_zeroed_set(node, extent_node_zeroed_get(node) && + !unzeroed); extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ - if (xnode == NULL) { + node = arena_node_alloc(arena); + if (node == NULL) { /* - * base_node_alloc() failed, which is an exceedingly - * unlikely failure. Leak chunk; its pages have - * already been purged, so this is only a virtual - * memory leak. + * Node allocation failed, which is an exceedingly + * unlikely failure. Leak chunk after making sure its + * pages have already been purged, so that this is only + * a virtual memory leak. */ + if (cache) { + chunk_purge_wrapper(arena, chunk_hooks, chunk, + size, 0, size); + } goto label_return; } - node = xnode; - xnode = NULL; /* Prevent deallocation below. */ - node->addr = chunk; - node->size = size; - node->zeroed = (unzeroed == false); + extent_node_init(node, arena, chunk, size, !unzeroed, + committed); extent_tree_ad_insert(chunks_ad, node); extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); } /* Try to coalesce backward. */ prev = extent_tree_ad_prev(chunks_ad, node); - if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) == - chunk) { + if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + + extent_node_size_get(prev)) == chunk && + extent_node_committed_get(prev) == committed && + !chunk_hooks->merge(extent_node_addr_get(prev), + extent_node_size_get(prev), chunk, size, false, arena->ind)) { /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only @@ -275,44 +537,42 @@ chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk, */ extent_tree_szad_remove(chunks_szad, prev); extent_tree_ad_remove(chunks_ad, prev); - + arena_chunk_cache_maybe_remove(arena, prev, cache); extent_tree_szad_remove(chunks_szad, node); - node->addr = prev->addr; - node->size += prev->size; - node->zeroed = (node->zeroed && prev->zeroed); + arena_chunk_cache_maybe_remove(arena, node, cache); + extent_node_addr_set(node, extent_node_addr_get(prev)); + extent_node_size_set(node, extent_node_size_get(prev) + + extent_node_size_get(node)); + extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && + extent_node_zeroed_get(node)); extent_tree_szad_insert(chunks_szad, node); + arena_chunk_cache_maybe_insert(arena, node, cache); - xprev = prev; + arena_node_dalloc(arena, prev); } label_return: - malloc_mutex_unlock(&chunks_mtx); - /* - * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to - * avoid potential deadlock. - */ - if (xnode != NULL) - base_node_dealloc(xnode); - if (xprev != NULL) - base_node_dealloc(xprev); + malloc_mutex_unlock(&arena->chunks_mtx); } void -chunk_unmap(void *chunk, size_t size) +chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool committed) { + assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); - if (config_dss && chunk_in_dss(chunk)) - chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size); - else if (chunk_dealloc_mmap(chunk, size)) - chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size); + chunk_record(arena, chunk_hooks, &arena->chunks_szad_cached, + &arena->chunks_ad_cached, true, chunk, size, false, committed); + arena_maybe_purge(arena); } void -chunk_dealloc(void *chunk, size_t size, bool unmap) +chunk_dalloc_arena(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool zeroed, bool committed) { assert(chunk != NULL); @@ -320,22 +580,149 @@ chunk_dealloc(void *chunk, size_t size, bool unmap) assert(size != 0); assert((size & chunksize_mask) == 0); - if (config_ivsalloc) - rtree_set(chunks_rtree, (uintptr_t)chunk, 0); - if (config_stats || config_prof) { - malloc_mutex_lock(&chunks_mtx); - assert(stats_chunks.curchunks >= (size / chunksize)); - stats_chunks.curchunks -= (size / chunksize); - malloc_mutex_unlock(&chunks_mtx); + chunk_hooks_assure_initialized(arena, chunk_hooks); + /* Try to deallocate. */ + if (!chunk_hooks->dalloc(chunk, size, committed, arena->ind)) + return; + /* Try to decommit; purge if that fails. */ + if (committed) { + committed = chunk_hooks->decommit(chunk, size, 0, size, + arena->ind); } + zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, + arena->ind); + chunk_record(arena, chunk_hooks, &arena->chunks_szad_retained, + &arena->chunks_ad_retained, false, chunk, size, zeroed, committed); +} + +static bool +chunk_dalloc_default(void *chunk, size_t size, bool committed, + unsigned arena_ind) +{ + + if (!have_dss || !chunk_in_dss(chunk)) + return (chunk_dalloc_mmap(chunk, size)); + return (true); +} + +void +chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, bool committed) +{ + + chunk_hooks_assure_initialized(arena, chunk_hooks); + chunk_hooks->dalloc(chunk, size, committed, arena->ind); + if (config_valgrind && chunk_hooks->dalloc != chunk_dalloc_default) + JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); +} + +static bool +chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ - if (unmap) - chunk_unmap(chunk, size); + return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +bool +chunk_purge_arena(arena_t *arena, void *chunk, size_t offset, size_t length) +{ + + assert(chunk != NULL); + assert(CHUNK_ADDR2BASE(chunk) == chunk); + assert((offset & PAGE_MASK) == 0); + assert(length != 0); + assert((length & PAGE_MASK) == 0); + + return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), + length)); +} + +static bool +chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, + unsigned arena_ind) +{ + + return (chunk_purge_arena(chunk_arena_get(arena_ind), chunk, offset, + length)); +} + +bool +chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, + size_t size, size_t offset, size_t length) +{ + + chunk_hooks_assure_initialized(arena, chunk_hooks); + return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); +} + +static bool +chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, + bool committed, unsigned arena_ind) +{ + + if (!maps_coalesce) + return (true); + return (false); +} + +static bool +chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, + bool committed, unsigned arena_ind) +{ + + if (!maps_coalesce) + return (true); + if (have_dss && chunk_in_dss(chunk_a) != chunk_in_dss(chunk_b)) + return (true); + + return (false); +} + +static rtree_node_elm_t * +chunks_rtree_node_alloc(size_t nelms) +{ + + return ((rtree_node_elm_t *)base_alloc(nelms * + sizeof(rtree_node_elm_t))); } bool chunk_boot(void) { +#ifdef _WIN32 + SYSTEM_INFO info; + GetSystemInfo(&info); + + /* + * Verify actual page size is equal to or an integral multiple of + * configured page size. + */ + if (info.dwPageSize & ((1U << LG_PAGE) - 1)) + return (true); + + /* + * Configure chunksize (if not set) to match granularity (usually 64K), + * so pages_map will always take fast path. + */ + if (!opt_lg_chunk) { + opt_lg_chunk = jemalloc_ffs((int)info.dwAllocationGranularity) + - 1; + } +#else + if (!opt_lg_chunk) + opt_lg_chunk = LG_CHUNK_DEFAULT; +#endif /* Set variables according to the value of opt_lg_chunk. */ chunksize = (ZU(1) << opt_lg_chunk); @@ -343,23 +730,11 @@ chunk_boot(void) chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); - if (config_stats || config_prof) { - if (malloc_mutex_init(&chunks_mtx)) - return (true); - memset(&stats_chunks, 0, sizeof(chunk_stats_t)); - } - if (config_dss && chunk_dss_boot()) + if (have_dss && chunk_dss_boot()) + return (true); + if (rtree_new(&chunks_rtree, (ZU(1) << (LG_SIZEOF_PTR+3)) - + opt_lg_chunk, chunks_rtree_node_alloc, NULL)) return (true); - extent_tree_szad_new(&chunks_szad_mmap); - extent_tree_ad_new(&chunks_ad_mmap); - extent_tree_szad_new(&chunks_szad_dss); - extent_tree_ad_new(&chunks_ad_dss); - if (config_ivsalloc) { - chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) - - opt_lg_chunk, base_alloc, NULL); - if (chunks_rtree == NULL) - return (true); - } return (false); } @@ -368,9 +743,6 @@ void chunk_prefork(void) { - malloc_mutex_prefork(&chunks_mtx); - if (config_ivsalloc) - rtree_prefork(chunks_rtree); chunk_dss_prefork(); } @@ -379,9 +751,6 @@ chunk_postfork_parent(void) { chunk_dss_postfork_parent(); - if (config_ivsalloc) - rtree_postfork_parent(chunks_rtree); - malloc_mutex_postfork_parent(&chunks_mtx); } void @@ -389,7 +758,4 @@ chunk_postfork_child(void) { chunk_dss_postfork_child(); - if (config_ivsalloc) - rtree_postfork_child(chunks_rtree); - malloc_mutex_postfork_child(&chunks_mtx); } |