summaryrefslogtreecommitdiff
path: root/slabs.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2018-02-08 16:25:50 -0800
committerdormando <dormando@rydia.net>2018-02-08 16:25:50 -0800
commit7f06ee82a7a2f5b8db40f08a8a88180c9038458f (patch)
treec896bee0ad450fb9d12a1c5c283b626579d059a2 /slabs.c
parent2918d09c93b572660e7c47935409e8d93efba094 (diff)
downloadmemcached-7f06ee82a7a2f5b8db40f08a8a88180c9038458f.tar.gz
extstore: revise automove algorithm
allows reassigning memory from global page pool to a specific class. this allows simplifying the algorithm to rely on moving memory to/from global, removing hacks around relaxing free memory requirements.
Diffstat (limited to 'slabs.c')
-rw-r--r--slabs.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/slabs.c b/slabs.c
index 6f59b17..200d575 100644
--- a/slabs.c
+++ b/slabs.c
@@ -678,7 +678,7 @@ static int slab_rebalance_start(void) {
pthread_mutex_lock(&slabs_lock);
- if (slab_rebal.s_clsid < POWER_SMALLEST ||
+ if (slab_rebal.s_clsid < SLAB_GLOBAL_PAGE_POOL ||
slab_rebal.s_clsid > power_largest ||
slab_rebal.d_clsid < SLAB_GLOBAL_PAGE_POOL ||
slab_rebal.d_clsid > power_largest ||
@@ -707,8 +707,11 @@ static int slab_rebalance_start(void) {
(s_cls->size * s_cls->perslab);
slab_rebal.slab_pos = slab_rebal.slab_start;
slab_rebal.done = 0;
+ // Don't need to do chunk move work if page is in global pool.
+ if (slab_rebal.s_clsid == SLAB_GLOBAL_PAGE_POOL) {
+ slab_rebal.done = 1;
+ }
- /* Also tells do_item_get to search for items in this slab */
slab_rebalance_signal = 2;
if (settings.verbose > 1) {
@@ -1189,7 +1192,7 @@ static enum reassign_result_type do_slabs_reassign(int src, int dst) {
/* TODO: If we end up back at -1, return a new error type */
}
- if (src < POWER_SMALLEST || src > power_largest ||
+ if (src < SLAB_GLOBAL_PAGE_POOL || src > power_largest ||
dst < SLAB_GLOBAL_PAGE_POOL || dst > power_largest)
return REASSIGN_BADCLASS;