summaryrefslogtreecommitdiff
path: root/deps/jemalloc/src/sz.c
diff options
context:
space:
mode:
Diffstat (limited to 'deps/jemalloc/src/sz.c')
-rw-r--r--deps/jemalloc/src/sz.c52
1 files changed, 51 insertions, 1 deletions
diff --git a/deps/jemalloc/src/sz.c b/deps/jemalloc/src/sz.c
index 8633fb050..d3115dda7 100644
--- a/deps/jemalloc/src/sz.c
+++ b/deps/jemalloc/src/sz.c
@@ -1,8 +1,57 @@
#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
+size_t sz_large_pad;
+
+size_t
+sz_psz_quantize_floor(size_t size) {
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
+
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
+ }
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
+ assert(ret <= size);
+ return ret;
+}
+
+size_t
+sz_psz_quantize_ceil(size_t size) {
+ size_t ret;
+
+ assert(size > 0);
+ assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = sz_psz_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
+ }
+ return ret;
+}
static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
@@ -57,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
}
void
-sz_boot(const sc_data_t *sc_data) {
+sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
+ sz_large_pad = cache_oblivious ? PAGE : 0;
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);