summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2019-07-26 01:32:26 -0700
committerdormando <dormando@rydia.net>2019-07-26 16:26:38 -0700
commitc630eeb7f4502a9d96cbec7cc9bda7ce1b1f6476 (patch)
tree3b054bd5e72b0e4da7f66455bdc01d97a9814f6b
parent78eb7701e0823643d693c1a7a6fd8a0c75db74d8 (diff)
downloadmemcached-c630eeb7f4502a9d96cbec7cc9bda7ce1b1f6476.tar.gz
move mem_requested from slabs.c to items.c
mem_requested is an oddball counter: it's the total number of bytes "actually requested" from the slab's caller. It's mainly used for a stats counter, alerting the user that the slab factor may not be efficient if the gap between total_chunks * chunk_size - mem_requested is large. However, since chunked items were added it's _also_ used to help the LRU balance itself. The total number of bytes used in the class vs the total number of bytes in a sub-LRU is used to judge whether to move items between sub-LRU's. This is a layer violation; forcing slabs.c to know more about how items work, as well as EXTSTORE for calculating item sizes from headers. Further, it turns out it wasn't necessary for item allocation: if we need to evict an item we _always_ pull from COLD_LRU or force a move from HOT_LRU. So the total doesn't matter. The total does matter in the LRU maintainer background thread. However, this thread caches mem_requested to avoid hitting the slab lock too frequently. Since sizes_bytes[] within items.c is generally redundant with mem_requested, we now total sizes_bytes[] from each sub-LRU before starting a batch of LRU juggles. This simplifies the code a bit, reduces the layer violations in slabs.c slightly, and actually speeds up some hot paths as a number of branches and operations are removed completely. This also fixes an issue I was having with the restartable memory branch :) recalculating p->requested and keeping a clean API is painful and slow. NOTE: This will vary a bit compared to what mem_requested originally did, mostly for large chunked items. For items which fit inside a single slab chunk, the stat is identical. However, items constructed by chaining chunks will have a single large "nbytes" value and end up in the highest slab class. Chunked items can be capped with chunks from smaller slab classes; you will see utilization of chunks but not an increase in mem_requested for them. I'm still thinking this through but this is probably acceptable. Large chunked items should be accounted for separately, perhaps with some new counters so they can be discounted from normal calculations.
-rw-r--r--doc/protocol.txt14
-rw-r--r--items.c43
-rw-r--r--memcached.c41
-rw-r--r--slabs.c105
-rw-r--r--slabs.h10
-rw-r--r--storage.c2
-rw-r--r--t/issue_42.t5
7 files changed, 81 insertions, 139 deletions
diff --git a/doc/protocol.txt b/doc/protocol.txt
index 81025b3..b5587a0 100644
--- a/doc/protocol.txt
+++ b/doc/protocol.txt
@@ -900,6 +900,7 @@ number_temp Number of items presently stored in the TEMPORARY LRU.
age_hot Age of the oldest item in HOT LRU.
age_warm Age of the oldest item in WARM LRU.
age Age of the oldest item in the LRU.
+mem_requested Number of bytes requested to be stored in this LRU[*]
evicted Number of times an item had to be evicted from the LRU
before it expired.
evicted_nonzero Number of times an item which had an explicit expire
@@ -940,6 +941,12 @@ hits_to_temp Number of get_hits to each sub-LRU.
Note this will only display information about slabs which exist, so an empty
cache will return an empty set.
+* Items are stored in a slab that is the same size or larger than the
+ item. mem_requested shows the size of all items within a
+ slab. (total_chunks * chunk_size) - mem_requested shows memory
+ wasted in a slab class. If you see a lot of waste, consider tuning
+ the slab factor.
+
Item size statistics
--------------------
@@ -1038,17 +1045,10 @@ END\r\n
| free_chunks | Chunks not yet allocated to items, or freed via delete. |
| free_chunks_end | Number of free chunks at the end of the last allocated |
| | page. |
-| mem_requested | Number of bytes requested to be stored in this slab[*]. |
| active_slabs | Total number of slab classes allocated. |
| total_malloced | Total amount of memory allocated to slab pages. |
|-----------------+----------------------------------------------------------|
-* Items are stored in a slab that is the same size or larger than the
- item. mem_requested shows the size of all items within a
- slab. (total_chunks * chunk_size) - mem_requested shows memory
- wasted in a slab class. If you see a lot of waste, consider tuning
- the slab factor.
-
Connection statistics
---------------------
diff --git a/items.c b/items.c
index 77b8296..16a8707 100644
--- a/items.c
+++ b/items.c
@@ -48,6 +48,7 @@ typedef struct {
uint64_t hits_to_warm;
uint64_t hits_to_cold;
uint64_t hits_to_temp;
+ uint64_t mem_requested;
rel_time_t evicted_time;
} itemstats_t;
@@ -128,16 +129,6 @@ int item_is_flushed(item *it) {
return 0;
}
-static unsigned int temp_lru_size(int slabs_clsid) {
- int id = CLEAR_LRU(slabs_clsid);
- id |= TEMP_LRU;
- unsigned int ret;
- pthread_mutex_lock(&lru_locks[id]);
- ret = sizes_bytes[id];
- pthread_mutex_unlock(&lru_locks[id]);
- return ret;
-}
-
/* must be locked before call */
unsigned int do_get_lru_size(uint32_t id) {
return sizes[id];
@@ -186,20 +177,19 @@ item *do_item_alloc_pull(const size_t ntotal, const unsigned int id) {
* This also gives one fewer code path for slab alloc/free
*/
for (i = 0; i < 10; i++) {
- uint64_t total_bytes;
/* Try to reclaim memory first */
if (!settings.lru_segmented) {
lru_pull_tail(id, COLD_LRU, 0, 0, 0, NULL);
}
- it = slabs_alloc(ntotal, id, &total_bytes, 0);
-
- if (settings.temp_lru)
- total_bytes -= temp_lru_size(id);
+ it = slabs_alloc(ntotal, id, 0);
if (it == NULL) {
- if (lru_pull_tail(id, COLD_LRU, total_bytes, LRU_PULL_EVICT, 0, NULL) <= 0) {
+ // We send '0' in for "total_bytes" as this routine is always
+ // pulling to evict, or forcing HOT -> COLD migration.
+ // As of this writing, total_bytes isn't at all used with COLD_LRU.
+ if (lru_pull_tail(id, COLD_LRU, 0, LRU_PULL_EVICT, 0, NULL) <= 0) {
if (settings.lru_segmented) {
- lru_pull_tail(id, HOT_LRU, total_bytes, 0, 0, NULL);
+ lru_pull_tail(id, HOT_LRU, 0, 0, 0, NULL);
} else {
break;
}
@@ -755,6 +745,7 @@ void item_stats(ADD_STAT add_stats, void *c) {
totals.moves_to_warm += itemstats[i].moves_to_warm;
totals.moves_within_lru += itemstats[i].moves_within_lru;
totals.direct_reclaims += itemstats[i].direct_reclaims;
+ totals.mem_requested += sizes_bytes[i];
size += sizes[i];
lru_size_map[x] = sizes[i];
if (lru_type_map[x] == COLD_LRU && tails[i] != NULL) {
@@ -796,6 +787,7 @@ void item_stats(ADD_STAT add_stats, void *c) {
APPEND_NUM_FMT_STAT(fmt, n, "age_warm", "%u", age_warm);
}
APPEND_NUM_FMT_STAT(fmt, n, "age", "%u", age);
+ APPEND_NUM_FMT_STAT(fmt, n, "mem_requested", "%llu", (unsigned long long)totals.mem_requested);
APPEND_NUM_FMT_STAT(fmt, n, "evicted",
"%llu", (unsigned long long)totals.evicted);
APPEND_NUM_FMT_STAT(fmt, n, "evicted_nonzero",
@@ -1363,7 +1355,7 @@ static int lru_maintainer_juggle(const int slabs_clsid) {
//unsigned int chunks_free = 0;
/* TODO: if free_chunks below high watermark, increase aggressiveness */
slabs_available_chunks(slabs_clsid, NULL,
- &total_bytes, &chunks_perslab);
+ &chunks_perslab);
if (settings.temp_lru) {
/* Only looking for reclaims. Run before we size the LRU. */
for (i = 0; i < 500; i++) {
@@ -1373,21 +1365,32 @@ static int lru_maintainer_juggle(const int slabs_clsid) {
did_moves++;
}
}
- total_bytes -= temp_lru_size(slabs_clsid);
}
rel_time_t cold_age = 0;
rel_time_t hot_age = 0;
rel_time_t warm_age = 0;
- /* If LRU is in flat mode, force items to drain into COLD via max age */
+ /* If LRU is in flat mode, force items to drain into COLD via max age of 0 */
if (settings.lru_segmented) {
pthread_mutex_lock(&lru_locks[slabs_clsid|COLD_LRU]);
if (tails[slabs_clsid|COLD_LRU]) {
cold_age = current_time - tails[slabs_clsid|COLD_LRU]->time;
}
+ // Also build up total_bytes for the classes.
+ total_bytes += sizes_bytes[slabs_clsid|COLD_LRU];
pthread_mutex_unlock(&lru_locks[slabs_clsid|COLD_LRU]);
+
hot_age = cold_age * settings.hot_max_factor;
warm_age = cold_age * settings.warm_max_factor;
+
+ // total_bytes doesn't have to be exact. cache it for the juggles.
+ pthread_mutex_lock(&lru_locks[slabs_clsid|HOT_LRU]);
+ total_bytes += sizes_bytes[slabs_clsid|HOT_LRU];
+ pthread_mutex_unlock(&lru_locks[slabs_clsid|HOT_LRU]);
+
+ pthread_mutex_lock(&lru_locks[slabs_clsid|WARM_LRU]);
+ total_bytes += sizes_bytes[slabs_clsid|WARM_LRU];
+ pthread_mutex_unlock(&lru_locks[slabs_clsid|WARM_LRU]);
}
/* Juggle HOT/WARM up to N times */
diff --git a/memcached.c b/memcached.c
index 67530b1..55651cd 100644
--- a/memcached.c
+++ b/memcached.c
@@ -108,7 +108,8 @@ static void stats_init(void);
static void server_stats(ADD_STAT add_stats, conn *c);
static void process_stat_settings(ADD_STAT add_stats, void *c);
static void conn_to_str(const conn *c, char *buf);
-
+/** Return a datum for stats in binary protocol */
+static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c);
/* defaults */
static void settings_init(void);
@@ -3413,6 +3414,44 @@ static void process_stat_settings(ADD_STAT add_stats, void *c) {
#endif
}
+static int nz_strcmp(int nzlength, const char *nz, const char *z) {
+ int zlength=strlen(z);
+ return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
+}
+
+static bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
+ bool ret = true;
+
+ if (add_stats != NULL) {
+ if (!stat_type) {
+ /* prepare general statistics for the engine */
+ STATS_LOCK();
+ APPEND_STAT("bytes", "%llu", (unsigned long long)stats_state.curr_bytes);
+ APPEND_STAT("curr_items", "%llu", (unsigned long long)stats_state.curr_items);
+ APPEND_STAT("total_items", "%llu", (unsigned long long)stats.total_items);
+ STATS_UNLOCK();
+ APPEND_STAT("slab_global_page_pool", "%u", global_page_pool_size(NULL));
+ item_stats_totals(add_stats, c);
+ } else if (nz_strcmp(nkey, stat_type, "items") == 0) {
+ item_stats(add_stats, c);
+ } else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
+ slabs_stats(add_stats, c);
+ } else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
+ item_stats_sizes(add_stats, c);
+ } else if (nz_strcmp(nkey, stat_type, "sizes_enable") == 0) {
+ item_stats_sizes_enable(add_stats, c);
+ } else if (nz_strcmp(nkey, stat_type, "sizes_disable") == 0) {
+ item_stats_sizes_disable(add_stats, c);
+ } else {
+ ret = false;
+ }
+ } else {
+ ret = false;
+ }
+
+ return ret;
+}
+
static void conn_to_str(const conn *c, char *buf) {
char addr_text[MAXPATHLEN];
diff --git a/slabs.c b/slabs.c
index 4014cae..06b1da6 100644
--- a/slabs.c
+++ b/slabs.c
@@ -36,8 +36,6 @@ typedef struct {
void **slab_list; /* array of slab pointers */
unsigned int list_size; /* size of prev array */
-
- size_t requested; /* The number of requested bytes */
} slabclass_t;
static slabclass_t slabclass[MAX_NUMBER_OF_SLAB_CLASSES];
@@ -331,7 +329,7 @@ static int do_slabs_newslab(const unsigned int id) {
}
/*@null@*/
-static void *do_slabs_alloc(const size_t size, unsigned int id, uint64_t *total_bytes,
+static void *do_slabs_alloc(const size_t size, unsigned int id,
unsigned int flags) {
slabclass_t *p;
void *ret = NULL;
@@ -343,9 +341,6 @@ static void *do_slabs_alloc(const size_t size, unsigned int id, uint64_t *total_
}
p = &slabclass[id];
assert(p->sl_curr == 0 || ((item *)p->slots)->slabs_clsid == 0);
- if (total_bytes != NULL) {
- *total_bytes = p->requested;
- }
assert(size <= p->size);
/* fail unless we have space at the end of a recently allocated page,
@@ -370,7 +365,6 @@ static void *do_slabs_alloc(const size_t size, unsigned int id, uint64_t *total_
}
if (ret) {
- p->requested += size;
MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret);
} else {
MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
@@ -403,19 +397,6 @@ static void do_slabs_free_chunked(item *it, const size_t size) {
if (it->next) it->next->prev = it;
p->slots = it;
p->sl_curr++;
- // TODO: macro
-#ifdef NEED_ALIGN
- int total = it->nkey + 1 + FLAGS_SIZE(it) + sizeof(item) + sizeof(item_chunk);
- if (total % 8 != 0) {
- total += 8 - (total % 8);
- }
- p->requested -= total;
-#else
- p->requested -= it->nkey + 1 + FLAGS_SIZE(it) + sizeof(item) + sizeof(item_chunk);
-#endif
- if (settings.use_cas) {
- p->requested -= sizeof(uint64_t);
- }
item_chunk *next_chunk;
while (chunk) {
@@ -430,7 +411,6 @@ static void do_slabs_free_chunked(item *it, const size_t size) {
if (chunk->next) chunk->next->prev = chunk;
p->slots = chunk;
p->sl_curr++;
- p->requested -= chunk->size + sizeof(item_chunk);
chunk = next_chunk;
}
@@ -452,9 +432,6 @@ static void do_slabs_free(void *ptr, const size_t size, unsigned int id) {
it = (item *)ptr;
if ((it->it_flags & ITEM_CHUNKED) == 0) {
-#ifdef EXTSTORE
- bool is_hdr = it->it_flags & ITEM_HDR;
-#endif
it->it_flags = ITEM_SLABBED;
it->slabs_clsid = 0;
it->prev = 0;
@@ -463,15 +440,6 @@ static void do_slabs_free(void *ptr, const size_t size, unsigned int id) {
p->slots = it;
p->sl_curr++;
-#ifdef EXTSTORE
- if (!is_hdr) {
- p->requested -= size;
- } else {
- p->requested -= (size - it->nbytes) + sizeof(item_hdr);
- }
-#else
- p->requested -= size;
-#endif
} else {
do_slabs_free_chunked(it, size);
}
@@ -508,46 +476,6 @@ unsigned int global_page_pool_size(bool *mem_flag) {
return ret;
}
-static int nz_strcmp(int nzlength, const char *nz, const char *z) {
- int zlength=strlen(z);
- return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
-}
-
-bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
- bool ret = true;
-
- if (add_stats != NULL) {
- if (!stat_type) {
- /* prepare general statistics for the engine */
- STATS_LOCK();
- APPEND_STAT("bytes", "%llu", (unsigned long long)stats_state.curr_bytes);
- APPEND_STAT("curr_items", "%llu", (unsigned long long)stats_state.curr_items);
- APPEND_STAT("total_items", "%llu", (unsigned long long)stats.total_items);
- STATS_UNLOCK();
- pthread_mutex_lock(&slabs_lock);
- APPEND_STAT("slab_global_page_pool", "%u", slabclass[SLAB_GLOBAL_PAGE_POOL].slabs);
- pthread_mutex_unlock(&slabs_lock);
- item_stats_totals(add_stats, c);
- } else if (nz_strcmp(nkey, stat_type, "items") == 0) {
- item_stats(add_stats, c);
- } else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
- slabs_stats(add_stats, c);
- } else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
- item_stats_sizes(add_stats, c);
- } else if (nz_strcmp(nkey, stat_type, "sizes_enable") == 0) {
- item_stats_sizes_enable(add_stats, c);
- } else if (nz_strcmp(nkey, stat_type, "sizes_disable") == 0) {
- item_stats_sizes_disable(add_stats, c);
- } else {
- ret = false;
- }
- } else {
- ret = false;
- }
-
- return ret;
-}
-
/*@null@*/
static void do_slabs_stats(ADD_STAT add_stats, void *c) {
int i, total;
@@ -576,8 +504,6 @@ static void do_slabs_stats(ADD_STAT add_stats, void *c) {
APPEND_NUM_STAT(i, "free_chunks", "%u", p->sl_curr);
/* Stat is dead, but displaying zero instead of removing it. */
APPEND_NUM_STAT(i, "free_chunks_end", "%u", 0);
- APPEND_NUM_STAT(i, "mem_requested", "%llu",
- (unsigned long long)p->requested);
APPEND_NUM_STAT(i, "get_hits", "%llu",
(unsigned long long)thread_stats.slab_stats[i].get_hits);
APPEND_NUM_STAT(i, "cmd_set", "%llu",
@@ -651,12 +577,12 @@ static void memory_release() {
}
}
-void *slabs_alloc(size_t size, unsigned int id, uint64_t *total_bytes,
+void *slabs_alloc(size_t size, unsigned int id,
unsigned int flags) {
void *ret;
pthread_mutex_lock(&slabs_lock);
- ret = do_slabs_alloc(size, id, total_bytes, flags);
+ ret = do_slabs_alloc(size, id, flags);
pthread_mutex_unlock(&slabs_lock);
return ret;
}
@@ -692,22 +618,8 @@ bool slabs_adjust_mem_limit(size_t new_mem_limit) {
return ret;
}
-void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal)
-{
- pthread_mutex_lock(&slabs_lock);
- slabclass_t *p;
- if (id < POWER_SMALLEST || id > power_largest) {
- fprintf(stderr, "Internal error! Invalid slab class\n");
- abort();
- }
-
- p = &slabclass[id];
- p->requested = p->requested - old + ntotal;
- pthread_mutex_unlock(&slabs_lock);
-}
-
unsigned int slabs_available_chunks(const unsigned int id, bool *mem_flag,
- uint64_t *total_bytes, unsigned int *chunks_perslab) {
+ unsigned int *chunks_perslab) {
unsigned int ret;
slabclass_t *p;
@@ -716,8 +628,6 @@ unsigned int slabs_available_chunks(const unsigned int id, bool *mem_flag,
ret = p->sl_curr;
if (mem_flag != NULL)
*mem_flag = mem_malloced >= mem_limit ? true : false;
- if (total_bytes != NULL)
- *total_bytes = p->requested;
if (chunks_perslab != NULL)
*chunks_perslab = p->perslab;
pthread_mutex_unlock(&slabs_lock);
@@ -808,7 +718,7 @@ static void *slab_rebalance_alloc(const size_t size, unsigned int id) {
item *new_it = NULL;
for (x = 0; x < s_cls->perslab; x++) {
- new_it = do_slabs_alloc(size, id, NULL, SLABS_ALLOC_NO_NEWPAGE);
+ new_it = do_slabs_alloc(size, id, SLABS_ALLOC_NO_NEWPAGE);
/* check that memory isn't within the range to clear */
if (new_it == NULL) {
break;
@@ -818,7 +728,6 @@ static void *slab_rebalance_alloc(const size_t size, unsigned int id) {
/* Pulled something we intend to free. Mark it as freed since
* we've already done the work of unlinking it from the freelist.
*/
- s_cls->requested -= size;
new_it->refcount = 0;
new_it->it_flags = ITEM_SLABBED|ITEM_FETCHED;
#ifdef DEBUG_SLAB_MOVER
@@ -1006,7 +915,6 @@ static int slab_rebalance_move(void) {
save_item = 1;
}
pthread_mutex_unlock(&slabs_lock);
- unsigned int requested_adjust = 0;
if (save_item) {
if (ch == NULL) {
assert((new_it->it_flags & ITEM_CHUNKED) == 0);
@@ -1034,7 +942,6 @@ static int slab_rebalance_move(void) {
memcpy(ITEM_key(it), "deadbeef", 8);
#endif
slab_rebal.rescues++;
- requested_adjust = ntotal;
} else {
item_chunk *nch = (item_chunk *) new_it;
/* Chunks always have head chunk (the main it) */
@@ -1049,7 +956,6 @@ static int slab_rebalance_move(void) {
memcpy(ITEM_key((item *)ch), "deadbeef", 8);
#endif
refcount_decr(it);
- requested_adjust = s_cls->size;
}
} else {
/* restore ntotal in case we tried saving a head chunk. */
@@ -1066,7 +972,6 @@ static int slab_rebalance_move(void) {
/* Always remove the ntotal, as we added it in during
* do_slabs_alloc() when copying the item.
*/
- s_cls->requested -= requested_adjust;
break;
case MOVE_FROM_SLAB:
it->refcount = 0;
diff --git a/slabs.h b/slabs.h
index 1516f86..abb4b89 100644
--- a/slabs.h
+++ b/slabs.h
@@ -22,20 +22,14 @@ unsigned int slabs_clsid(const size_t size);
/** Allocate object of given length. 0 on error */ /*@null@*/
#define SLABS_ALLOC_NO_NEWPAGE 1
-void *slabs_alloc(const size_t size, unsigned int id, uint64_t *total_bytes, unsigned int flags);
+void *slabs_alloc(const size_t size, unsigned int id, unsigned int flags);
/** Free previously allocated object */
void slabs_free(void *ptr, size_t size, unsigned int id);
-/** Adjust the stats for memory requested */
-void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal);
-
/** Adjust global memory limit up or down */
bool slabs_adjust_mem_limit(size_t new_mem_limit);
-/** Return a datum for stats in binary protocol */
-bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c);
-
typedef struct {
unsigned int chunks_per_page;
unsigned int chunk_size;
@@ -49,7 +43,7 @@ unsigned int global_page_pool_size(bool *mem_flag);
void slabs_stats(ADD_STAT add_stats, void *c);
/* Hints as to freespace in slab class */
-unsigned int slabs_available_chunks(unsigned int id, bool *mem_flag, uint64_t *total_bytes, unsigned int *chunks_perslab);
+unsigned int slabs_available_chunks(unsigned int id, bool *mem_flag, unsigned int *chunks_perslab);
void slabs_mlock(void);
void slabs_munlock(void);
diff --git a/storage.c b/storage.c
index 8b7764a..b387e37 100644
--- a/storage.c
+++ b/storage.c
@@ -154,7 +154,7 @@ static void *storage_write_thread(void *arg) {
// Avoid extra slab lock calls during heavy writing.
chunks_free = slabs_available_chunks(x, &mem_limit_reached,
- NULL, NULL);
+ NULL);
// storage_write() will fail and cut loop after filling write buffer.
while (1) {
diff --git a/t/issue_42.t b/t/issue_42.t
index d0cb32d..b1b70b0 100644
--- a/t/issue_42.t
+++ b/t/issue_42.t
@@ -18,8 +18,9 @@ for ($key = 0; $key < $key_count; $key++) {
}
my $stats = mem_stats($sock, "slabs");
-my $req = $stats->{"1:mem_requested"};
+my $stats2 = mem_stats($sock, "items");
+my $req = $stats2->{"items:1:mem_requested"};
my $top = $stats->{"1:chunk_size"} * $key_count;
# unreasonable for the result to be < 500 bytes (min item header is 48), but
# should be less than the maximum potential number.
-ok ($req > 500 && $req < $top, "Check allocated size");
+ok ($req > 500 && $req < $top, "Check allocated size: $req $top");