summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2014-04-17 15:59:51 -0700
committerdormando <dormando@rydia.net>2014-04-17 15:59:51 -0700
commitd7324b0bd1085c6239463f89bc9ee891795d0e46 (patch)
treec575f045d9b6ff7f31bb5bb6f0ae574711144c28
parente31a591210311d0658a90a86f71563fa6d7b095c (diff)
downloadmemcached-1.4.18.tar.gz
fix LRU contention for first minute of uptime1.4.18
Noticed in benchmarks that memcached was slow for the first minute. ~350k gets/sec max, then it would jump up to millions/sec. if (it->time < current_time - ITEM_UPDATE_INTERVAL) { current_time starts at 2, which then underflows toward 4 billion, which is larger than it->time, so all fetches for the first minute require shuffling the LRU. Which happens under a global lock. This probably goofed up all short-term benchmarks, and potentially slowed down high traffic instances which were recently restarted. Unfortunately this means the tests no longer exercise item_update(), so some more work is necessary.
-rw-r--r--items.c7
-rw-r--r--memcached.c6
-rw-r--r--memcached.h7
3 files changed, 10 insertions, 10 deletions
diff --git a/items.c b/items.c
index ddf2035..688ad03 100644
--- a/items.c
+++ b/items.c
@@ -18,13 +18,6 @@
static void item_link_q(item *it);
static void item_unlink_q(item *it);
-/*
- * We only reposition items in the LRU queue if they haven't been repositioned
- * in this many seconds. That saves us from churning on frequently-accessed
- * items.
- */
-#define ITEM_UPDATE_INTERVAL 60
-
#define LARGEST_ID POWER_LARGEST
typedef struct {
uint64_t evicted;
diff --git a/memcached.c b/memcached.c
index f4c13a8..9864d5a 100644
--- a/memcached.c
+++ b/memcached.c
@@ -188,7 +188,7 @@ static void stats_init(void) {
did, so time(0) - time.started is never zero. if so, things
like 'settings.oldest_live' which act as booleans as well as
values are now false in boolean context... */
- process_started = time(0) - 2;
+ process_started = time(0) - ITEM_UPDATE_INTERVAL - 2;
stats_prefix_init();
}
@@ -2562,7 +2562,7 @@ static void server_stats(ADD_STAT add_stats, conn *c) {
STATS_LOCK();
APPEND_STAT("pid", "%lu", (long)pid);
- APPEND_STAT("uptime", "%u", now);
+ APPEND_STAT("uptime", "%u", now - ITEM_UPDATE_INTERVAL);
APPEND_STAT("time", "%ld", now + (long)process_started);
APPEND_STAT("version", "%s", VERSION);
APPEND_STAT("libevent", "%s", event_get_version());
@@ -4708,7 +4708,7 @@ static void clock_handler(const int fd, const short which, void *arg) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
monotonic = true;
- monotonic_start = ts.tv_sec - 2;
+ monotonic_start = ts.tv_sec - ITEM_UPDATE_INTERVAL - 2;
}
#endif
}
diff --git a/memcached.h b/memcached.h
index 578b5ee..b763f4a 100644
--- a/memcached.h
+++ b/memcached.h
@@ -63,6 +63,13 @@
/* Initial power multiplier for the hash table */
#define HASHPOWER_DEFAULT 16
+/*
+ * We only reposition items in the LRU queue if they haven't been repositioned
+ * in this many seconds. That saves us from churning on frequently-accessed
+ * items.
+ */
+#define ITEM_UPDATE_INTERVAL 60
+
/* unistd.h is here */
#if HAVE_UNISTD_H
# include <unistd.h>