summaryrefslogtreecommitdiff
path: root/extstore.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2021-11-15 14:31:27 -0800
committerdormando <dormando@rydia.net>2021-11-18 14:07:18 -0800
commit9917c798503e14b026c67068f1406bfc23c5a5b9 (patch)
tree001c322151d68cb49e924a0b7cd5679f2e35d940 /extstore.c
parentbe9a421fb1b217ca844777a75009645df7265e02 (diff)
downloadmemcached-9917c798503e14b026c67068f1406bfc23c5a5b9.tar.gz
extstore: avoid looping IO queues on submission
with low (ie; 1) IO threads, and a long IO depth (10k+) that might suddenly appear during a disk hiccup, we can cause a slowdown as each worker thread submission walks the entire queue. this fully avoids walking objects while holding any lock, though we can still do that a bit on the IO thread's end when reading the queue.
Diffstat (limited to 'extstore.c')
-rw-r--r--extstore.c40
1 files changed, 24 insertions, 16 deletions
diff --git a/extstore.c b/extstore.c
index 8f32f51..f32292f 100644
--- a/extstore.c
+++ b/extstore.c
@@ -78,6 +78,7 @@ typedef struct {
pthread_mutex_t mutex;
pthread_cond_t cond;
obj_io *queue;
+ obj_io *queue_tail;
store_engine *e;
unsigned int depth; // queue depth
} store_io_thread;
@@ -609,28 +610,31 @@ void extstore_write(void *ptr, obj_io *io) {
*/
int extstore_submit(void *ptr, obj_io *io) {
store_engine *e = (store_engine *)ptr;
- store_io_thread *t = _get_io_thread(e);
+ unsigned int depth = 0;
+ obj_io *tio = io;
+ obj_io *tail = NULL;
+ while (tio != NULL) {
+ tail = tio; // keep updating potential tail.
+ depth++;
+ tio = tio->next;
+ }
+
+ store_io_thread *t = _get_io_thread(e);
pthread_mutex_lock(&t->mutex);
+
+ t->depth += depth;
if (t->queue == NULL) {
t->queue = io;
+ t->queue_tail = tail;
} else {
- /* Have to put the *io stack at the end of current queue.
- * FIXME: Optimize by tracking tail.
- */
- obj_io *tmp = t->queue;
- while (tmp->next != NULL) {
- tmp = tmp->next;
- assert(tmp != t->queue);
- }
- tmp->next = io;
- }
- // TODO: extstore_submit(ptr, io, count)
- obj_io *tio = io;
- while (tio != NULL) {
- t->depth++;
- tio = tio->next;
+ // Have to put the *io stack at the end of current queue.
+ assert(tail->next == NULL);
+ assert(t->queue_tail->next == NULL);
+ t->queue_tail->next = io;
+ t->queue_tail = tail;
}
+
pthread_mutex_unlock(&t->mutex);
//pthread_mutex_lock(&t->mutex);
@@ -743,6 +747,9 @@ static void *extstore_io_thread(void *arg) {
}
// Pull and disconnect a batch from the queue
+ // Chew small batches from the queue so the IO thread picker can keep
+ // the IO queue depth even, instead of piling on threads one at a time
+ // as they gobble a queue.
if (me->queue != NULL) {
int i;
obj_io *end = NULL;
@@ -752,6 +759,7 @@ static void *extstore_io_thread(void *arg) {
if (end->next) {
end = end->next;
} else {
+ me->queue_tail = end->next;
break;
}
}