summaryrefslogtreecommitdiff
path: root/src/redis-benchmark.c
diff options
context:
space:
mode:
authorvattezhang <vattezhang@163.com>2019-03-12 21:52:20 +0800
committervattezhang <vattezhang@163.com>2019-03-12 21:52:20 +0800
commit270a11143320c7dcc100d4ed0b6bfc7a4bd40b31 (patch)
tree742dd431e7bc8a74c55ab42dd3e75ddf9f3bc8f4 /src/redis-benchmark.c
parent0f0f787a37e6411a02d9a992ecc7bb8af7decf9a (diff)
parent8a46d32be2eaf07b6b2e8c3757e4d9f59cd1ab64 (diff)
downloadredis-270a11143320c7dcc100d4ed0b6bfc7a4bd40b31.tar.gz
Merge branch 'unstable' of github.com:antirez/redis into unstable
Diffstat (limited to 'src/redis-benchmark.c')
-rw-r--r--src/redis-benchmark.c937
1 files changed, 881 insertions, 56 deletions
diff --git a/src/redis-benchmark.c b/src/redis-benchmark.c
index 4f0f3404a..e0c62f9c1 100644
--- a/src/redis-benchmark.c
+++ b/src/redis-benchmark.c
@@ -1,4 +1,4 @@
-/* Redis benchmark utility.
+/* Redis benchmark utility.
*
* Copyright (c) 2009-2012, Salvatore Sanfilippo <antirez at gmail dot com>
* All rights reserved.
@@ -40,16 +40,29 @@
#include <signal.h>
#include <assert.h>
#include <math.h>
+#include <pthread.h>
#include <sds.h> /* Use hiredis sds. */
#include "ae.h"
#include "hiredis.h"
#include "adlist.h"
+#include "dict.h"
#include "zmalloc.h"
+#include "atomicvar.h"
+#include "crc16_slottable.h"
#define UNUSED(V) ((void) V)
#define RANDPTR_INITIAL_SIZE 8
#define MAX_LATENCY_PRECISION 3
+#define MAX_THREADS 500
+#define CLUSTER_SLOTS 16384
+
+#define CLIENT_GET_EVENTLOOP(c) \
+ (c->thread_id >= 0 ? config.threads[c->thread_id]->el : config.el)
+
+struct benchmarkThread;
+struct clusterNode;
+struct redisConfig;
static struct config {
aeEventLoop *el;
@@ -82,6 +95,23 @@ static struct config {
char *tests;
char *auth;
int precision;
+ int num_threads;
+ struct benchmarkThread **threads;
+ int cluster_mode;
+ int cluster_node_count;
+ struct clusterNode **cluster_nodes;
+ struct redisConfig *redis_config;
+ int is_fetching_slots;
+ int is_updating_slots;
+ int slots_last_update;
+ /* Thread mutexes to be used as fallbacks by atomicvar.h */
+ pthread_mutex_t requests_issued_mutex;
+ pthread_mutex_t requests_finished_mutex;
+ pthread_mutex_t liveclients_mutex;
+ pthread_mutex_t is_fetching_slots_mutex;
+ pthread_mutex_t is_updating_slots_mutex;
+ pthread_mutex_t updating_slots_mutex;
+ pthread_mutex_t slots_last_update_mutex;
} config;
typedef struct _client {
@@ -90,6 +120,9 @@ typedef struct _client {
char **randptr; /* Pointers to :rand: strings inside the command buf */
size_t randlen; /* Number of pointers in client->randptr */
size_t randfree; /* Number of unused pointers in client->randptr */
+ char **stagptr; /* Pointers to slot hashtags (cluster mode only) */
+ size_t staglen; /* Number of pointers in client->stagptr */
+ size_t stagfree; /* Number of unused pointers in client->stagptr */
size_t written; /* Bytes of 'obuf' already written */
long long start; /* Start time of a request */
long long latency; /* Request latency */
@@ -98,11 +131,66 @@ typedef struct _client {
such as auth and select are prefixed to the pipeline of
benchmark commands and discarded after the first send. */
int prefixlen; /* Size in bytes of the pending prefix commands */
+ int thread_id;
+ struct clusterNode *cluster_node;
+ int slots_last_update;
} *client;
+/* Threads. */
+
+typedef struct benchmarkThread {
+ int index;
+ pthread_t thread;
+ aeEventLoop *el;
+} benchmarkThread;
+
+/* Cluster. */
+typedef struct clusterNode {
+ char *ip;
+ int port;
+ sds name;
+ int flags;
+ sds replicate; /* Master ID if node is a slave */
+ int *slots;
+ int slots_count;
+ int current_slot_index;
+ int *updated_slots; /* Used by updateClusterSlotsConfiguration */
+ int updated_slots_count; /* Used by updateClusterSlotsConfiguration */
+ int replicas_count;
+ sds *migrating; /* An array of sds where even strings are slots and odd
+ * strings are the destination node IDs. */
+ sds *importing; /* An array of sds where even strings are slots and odd
+ * strings are the source node IDs. */
+ int migrating_count; /* Length of the migrating array (migrating slots*2) */
+ int importing_count; /* Length of the importing array (importing slots*2) */
+ struct redisConfig *redis_config;
+} clusterNode;
+
+typedef struct redisConfig {
+ sds save;
+ sds appendonly;
+} redisConfig;
+
/* Prototypes */
static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask);
static void createMissingClients(client c);
+static benchmarkThread *createBenchmarkThread(int index);
+static void freeBenchmarkThread(benchmarkThread *thread);
+static void freeBenchmarkThreads();
+static void *execBenchmarkThread(void *ptr);
+static clusterNode *createClusterNode(char *ip, int port);
+static redisConfig *getRedisConfig(const char *ip, int port,
+ const char *hostsocket);
+static void freeRedisConfig(redisConfig *cfg);
+static int fetchClusterSlotsConfiguration(client c);
+static void updateClusterSlotsConfiguration();
+int showThroughput(struct aeEventLoop *eventLoop, long long id,
+ void *clientData);
+
+/* Dict callbacks */
+static uint64_t dictSdsHash(const void *key);
+static int dictSdsKeyCompare(void *privdata, const void *key1,
+ const void *key2);
/* Implementation */
static long long ustime(void) {
@@ -125,18 +213,112 @@ static long long mstime(void) {
return mst;
}
+static uint64_t dictSdsHash(const void *key) {
+ return dictGenHashFunction((unsigned char*)key, sdslen((char*)key));
+}
+
+static int dictSdsKeyCompare(void *privdata, const void *key1,
+ const void *key2)
+{
+ int l1,l2;
+ DICT_NOTUSED(privdata);
+
+ l1 = sdslen((sds)key1);
+ l2 = sdslen((sds)key2);
+ if (l1 != l2) return 0;
+ return memcmp(key1, key2, l1) == 0;
+}
+
+/* _serverAssert is needed by dict */
+void _serverAssert(const char *estr, const char *file, int line) {
+ fprintf(stderr, "=== ASSERTION FAILED ===");
+ fprintf(stderr, "==> %s:%d '%s' is not true",file,line,estr);
+ *((char*)-1) = 'x';
+}
+
+static redisConfig *getRedisConfig(const char *ip, int port,
+ const char *hostsocket)
+{
+ redisConfig *cfg = zcalloc(sizeof(*cfg));
+ if (!cfg) return NULL;
+ redisContext *c = NULL;
+ redisReply *reply = NULL, *sub_reply = NULL;
+ if (hostsocket == NULL)
+ c = redisConnect(ip, port);
+ else
+ c = redisConnectUnix(hostsocket);
+ if (c->err) {
+ fprintf(stderr,"Could not connect to Redis at ");
+ if (hostsocket == NULL)
+ fprintf(stderr,"%s:%d: %s\n",ip,port,c->errstr);
+ else fprintf(stderr,"%s: %s\n",hostsocket,c->errstr);
+ goto fail;
+ }
+ redisAppendCommand(c, "CONFIG GET %s", "save");
+ redisAppendCommand(c, "CONFIG GET %s", "appendonly");
+ int i = 0;
+ void *r = NULL;
+ for (; i < 2; i++) {
+ int res = redisGetReply(c, &r);
+ if (reply) freeReplyObject(reply);
+ reply = ((redisReply *) r);
+ if (res != REDIS_OK || !r) goto fail;
+ if (reply->type == REDIS_REPLY_ERROR) {
+ fprintf(stderr, "ERROR: %s\n", reply->str);
+ goto fail;
+ }
+ if (reply->type != REDIS_REPLY_ARRAY || reply->elements < 2) goto fail;
+ sub_reply = reply->element[1];
+ char *value = sub_reply->str;
+ if (!value) value = "";
+ switch (i) {
+ case 0: cfg->save = sdsnew(value); break;
+ case 1: cfg->appendonly = sdsnew(value); break;
+ }
+ }
+ if (reply) freeReplyObject(reply);
+ if (c) redisFree(c);
+ return cfg;
+fail:
+ if (reply) freeReplyObject(reply);
+ if (c) redisFree(c);
+ zfree(cfg);
+ fprintf(stderr, "ERROR: failed to fetch CONFIG from ");
+ if (c->connection_type == REDIS_CONN_TCP)
+ fprintf(stderr, "%s:%d\n", c->tcp.host, c->tcp.port);
+ else if (c->connection_type == REDIS_CONN_UNIX)
+ fprintf(stderr, "%s\n", c->unix_sock.path);
+ return NULL;
+}
+static void freeRedisConfig(redisConfig *cfg) {
+ if (cfg->save) sdsfree(cfg->save);
+ if (cfg->appendonly) sdsfree(cfg->appendonly);
+ zfree(cfg);
+}
+
static void freeClient(client c) {
+ aeEventLoop *el = CLIENT_GET_EVENTLOOP(c);
listNode *ln;
- aeDeleteFileEvent(config.el,c->context->fd,AE_WRITABLE);
- aeDeleteFileEvent(config.el,c->context->fd,AE_READABLE);
+ aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
+ aeDeleteFileEvent(el,c->context->fd,AE_READABLE);
+ if (c->thread_id >= 0) {
+ int requests_finished = 0;
+ atomicGet(config.requests_finished, requests_finished);
+ if (requests_finished >= config.requests) {
+ aeStop(el);
+ }
+ }
redisFree(c->context);
sdsfree(c->obuf);
zfree(c->randptr);
+ zfree(c->stagptr);
zfree(c);
+ if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex));
config.liveclients--;
ln = listSearchKey(config.clients,c);
assert(ln != NULL);
listDelNode(config.clients,ln);
+ if (config.num_threads) pthread_mutex_unlock(&(config.liveclients_mutex));
}
static void freeAllClients(void) {
@@ -150,9 +332,10 @@ static void freeAllClients(void) {
}
static void resetClient(client c) {
- aeDeleteFileEvent(config.el,c->context->fd,AE_WRITABLE);
- aeDeleteFileEvent(config.el,c->context->fd,AE_READABLE);
- aeCreateFileEvent(config.el,c->context->fd,AE_WRITABLE,writeHandler,c);
+ aeEventLoop *el = CLIENT_GET_EVENTLOOP(c);
+ aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
+ aeDeleteFileEvent(el,c->context->fd,AE_READABLE);
+ aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c);
c->written = 0;
c->pending = config.pipeline;
}
@@ -173,18 +356,49 @@ static void randomizeClientKey(client c) {
}
}
+static void setClusterKeyHashTag(client c) {
+ assert(c->thread_id >= 0);
+ clusterNode *node = c->cluster_node;
+ assert(node);
+ assert(node->current_slot_index < node->slots_count);
+ int is_updating_slots = 0;
+ atomicGet(config.is_updating_slots, is_updating_slots);
+ /* If updateClusterSlotsConfiguration is updating the slots array,
+ * call updateClusterSlotsConfiguration is order to block the thread
+ * since the mutex is locked. When the slots will be updated by the
+ * thread that's actually performing the update, the execution of
+ * updateClusterSlotsConfiguration won't actually do anything, since
+ * the updated_slots_count array will be already NULL. */
+ if (is_updating_slots) updateClusterSlotsConfiguration();
+ int slot = node->slots[node->current_slot_index];
+ const char *tag = crc16_slot_table[slot];
+ int taglen = strlen(tag);
+ size_t i;
+ for (i = 0; i < c->staglen; i++) {
+ char *p = c->stagptr[i] + 1;
+ p[0] = tag[0];
+ p[1] = (taglen >= 2 ? tag[1] : '}');
+ p[2] = (taglen == 3 ? tag[2] : '}');
+ }
+}
+
static void clientDone(client c) {
- if (config.requests_finished == config.requests) {
+ int requests_finished = 0;
+ atomicGet(config.requests_finished, requests_finished);
+ if (requests_finished >= config.requests) {
freeClient(c);
- aeStop(config.el);
+ if (!config.num_threads && config.el) aeStop(config.el);
return;
}
if (config.keepalive) {
resetClient(c);
} else {
+ if (config.num_threads) pthread_mutex_lock(&(config.liveclients_mutex));
config.liveclients--;
createMissingClients(c);
config.liveclients++;
+ if (config.num_threads)
+ pthread_mutex_unlock(&(config.liveclients_mutex));
freeClient(c);
}
}
@@ -221,15 +435,45 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
fprintf(stderr,"Unexpected error reply, exiting...\n");
exit(1);
}
+ redisReply *r = reply;
+ int is_err = (r->type == REDIS_REPLY_ERROR);
- if (config.showerrors) {
+ if (is_err && config.showerrors) {
+ /* TODO: static lasterr_time not thread-safe */
static time_t lasterr_time = 0;
time_t now = time(NULL);
- redisReply *r = reply;
- if (r->type == REDIS_REPLY_ERROR && lasterr_time != now) {
+ if (lasterr_time != now) {
lasterr_time = now;
- printf("Error from server: %s\n", r->str);
+ if (c->cluster_node) {
+ printf("Error from server %s:%d: %s\n",
+ c->cluster_node->ip,
+ c->cluster_node->port,
+ r->str);
+ } else printf("Error from server: %s\n", r->str);
+ }
+ }
+
+ if (config.cluster_mode && is_err && c->cluster_node &&
+ (!strncmp(r->str,"MOVED",5) ||
+ !strcmp(r->str,"ASK")))
+ {
+ /* Try to update slots configuration if the key of the
+ * command is using the slot hash tag. */
+ if (c->staglen && !fetchClusterSlotsConfiguration(c))
+ exit(1);
+ /*
+ clusterNode *node = c->cluster_node;
+ assert(node);
+ if (++node->current_slot_index >= node->slots_count) {
+ if (config.showerrors) {
+ fprintf(stderr, "WARN: No more available slots in "
+ "node %s:%d\n", node->ip, node->port);
+ }
+ freeReplyObject(reply);
+ freeClient(c);
+ return;
}
+ */
}
freeReplyObject(reply);
@@ -249,9 +493,10 @@ static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
}
continue;
}
-
- if (config.requests_finished < config.requests)
- config.latency[config.requests_finished++] = c->latency;
+ int requests_finished = 0;
+ atomicGetIncr(config.requests_finished, requests_finished, 1);
+ if (requests_finished < config.requests)
+ config.latency[requests_finished] = c->latency;
c->pending--;
if (c->pending == 0) {
clientDone(c);
@@ -273,17 +518,20 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
/* Initialize request when nothing was written. */
if (c->written == 0) {
/* Enforce upper bound to number of requests. */
- if (config.requests_issued++ >= config.requests) {
+ int requests_issued = 0;
+ atomicGetIncr(config.requests_issued, requests_issued, 1);
+ if (requests_issued >= config.requests) {
freeClient(c);
return;
}
/* Really initialize: randomize keys and set start time. */
if (config.randomkeys) randomizeClientKey(c);
+ if (config.cluster_mode && c->staglen > 0) setClusterKeyHashTag(c);
+ atomicGet(config.slots_last_update, c->slots_last_update);
c->start = ustime();
c->latency = -1;
}
-
if (sdslen(c->obuf) > c->written) {
void *ptr = c->obuf+c->written;
ssize_t nwritten = write(c->context->fd,ptr,sdslen(c->obuf)-c->written);
@@ -295,8 +543,8 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
}
c->written += nwritten;
if (sdslen(c->obuf) == c->written) {
- aeDeleteFileEvent(config.el,c->context->fd,AE_WRITABLE);
- aeCreateFileEvent(config.el,c->context->fd,AE_READABLE,readHandler,c);
+ aeDeleteFileEvent(el,c->context->fd,AE_WRITABLE);
+ aeCreateFileEvent(el,c->context->fd,AE_READABLE,readHandler,c);
}
}
}
@@ -322,23 +570,43 @@ static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) {
* for arguments randomization.
*
* Even when cloning another client, prefix commands are applied if needed.*/
-static client createClient(char *cmd, size_t len, client from) {
+static client createClient(char *cmd, size_t len, client from, int thread_id) {
int j;
+ int is_cluster_client = (config.cluster_mode && thread_id >= 0);
client c = zmalloc(sizeof(struct _client));
- if (config.hostsocket == NULL) {
- c->context = redisConnectNonBlock(config.hostip,config.hostport);
+ const char *ip = NULL;
+ int port = 0;
+ c->cluster_node = NULL;
+ if (config.hostsocket == NULL || is_cluster_client) {
+ if (!is_cluster_client) {
+ ip = config.hostip;
+ port = config.hostport;
+ } else {
+ int node_idx = 0;
+ if (config.num_threads < config.cluster_node_count)
+ node_idx = config.liveclients % config.cluster_node_count;
+ else
+ node_idx = thread_id % config.cluster_node_count;
+ clusterNode *node = config.cluster_nodes[node_idx];
+ assert(node != NULL);
+ ip = (const char *) node->ip;
+ port = node->port;
+ c->cluster_node = node;
+ }
+ c->context = redisConnectNonBlock(ip,port);
} else {
c->context = redisConnectUnixNonBlock(config.hostsocket);
}
if (c->context->err) {
fprintf(stderr,"Could not connect to Redis at ");
- if (config.hostsocket == NULL)
- fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport,c->context->errstr);
+ if (config.hostsocket == NULL || is_cluster_client)
+ fprintf(stderr,"%s:%d: %s\n",ip,port,c->context->errstr);
else
fprintf(stderr,"%s: %s\n",config.hostsocket,c->context->errstr);
exit(1);
}
+ c->thread_id = thread_id;
/* Suppress hiredis cleanup of unused buffers for max speed. */
c->context->reader->maxbuf = 0;
@@ -362,7 +630,7 @@ static client createClient(char *cmd, size_t len, client from) {
* buffer with the SELECT command, that will be discarded the first
* time the replies are received, so if the client is reused the
* SELECT command will not be used again. */
- if (config.dbnum != 0) {
+ if (config.dbnum != 0 && !is_cluster_client) {
c->obuf = sdscatprintf(c->obuf,"*2\r\n$6\r\nSELECT\r\n$%d\r\n%s\r\n",
(int)sdslen(config.dbnumstr),config.dbnumstr);
c->prefix_pending++;
@@ -382,6 +650,8 @@ static client createClient(char *cmd, size_t len, client from) {
c->pending = config.pipeline+c->prefix_pending;
c->randptr = NULL;
c->randlen = 0;
+ c->stagptr = NULL;
+ c->staglen = 0;
/* Find substrings in the output buffer that need to be randomized. */
if (config.randomkeys) {
@@ -412,18 +682,57 @@ static client createClient(char *cmd, size_t len, client from) {
}
}
}
+ /* If cluster mode is enabled, set slot hashtags pointers. */
+ if (config.cluster_mode) {
+ if (from) {
+ c->staglen = from->staglen;
+ c->stagfree = 0;
+ c->stagptr = zmalloc(sizeof(char*)*c->staglen);
+ /* copy the offsets. */
+ for (j = 0; j < (int)c->staglen; j++) {
+ c->stagptr[j] = c->obuf + (from->stagptr[j]-from->obuf);
+ /* Adjust for the different select prefix length. */
+ c->stagptr[j] += c->prefixlen - from->prefixlen;
+ }
+ } else {
+ char *p = c->obuf;
+
+ c->staglen = 0;
+ c->stagfree = RANDPTR_INITIAL_SIZE;
+ c->stagptr = zmalloc(sizeof(char*)*c->stagfree);
+ while ((p = strstr(p,"{tag}")) != NULL) {
+ if (c->stagfree == 0) {
+ c->stagptr = zrealloc(c->stagptr,
+ sizeof(char*) * c->staglen*2);
+ c->stagfree += c->staglen;
+ }
+ c->stagptr[c->staglen++] = p;
+ c->stagfree--;
+ p += 5; /* 12 is strlen("{tag}"). */
+ }
+ }
+ }
+ aeEventLoop *el = NULL;
+ if (thread_id < 0) el = config.el;
+ else {
+ benchmarkThread *thread = config.threads[thread_id];
+ el = thread->el;
+ }
if (config.idlemode == 0)
- aeCreateFileEvent(config.el,c->context->fd,AE_WRITABLE,writeHandler,c);
+ aeCreateFileEvent(el,c->context->fd,AE_WRITABLE,writeHandler,c);
listAddNodeTail(config.clients,c);
- config.liveclients++;
+ atomicIncr(config.liveclients, 1);
+ atomicGet(config.slots_last_update, c->slots_last_update);
return c;
}
static void createMissingClients(client c) {
int n = 0;
-
while(config.liveclients < config.numclients) {
- createClient(NULL,0,c);
+ int thread_id = -1;
+ if (config.num_threads)
+ thread_id = config.liveclients % config.num_threads;
+ createClient(NULL,0,c,thread_id);
/* Listen backlog is quite limited on most systems */
if (++n > 64) {
@@ -460,6 +769,31 @@ static void showLatencyReport(void) {
printf(" %d parallel clients\n", config.numclients);
printf(" %d bytes payload\n", config.datasize);
printf(" keep alive: %d\n", config.keepalive);
+ if (config.cluster_mode) {
+ printf(" cluster mode: yes (%d masters)\n",
+ config.cluster_node_count);
+ int m ;
+ for (m = 0; m < config.cluster_node_count; m++) {
+ clusterNode *node = config.cluster_nodes[m];
+ redisConfig *cfg = node->redis_config;
+ if (cfg == NULL) continue;
+ printf(" node [%d] configuration:\n",m );
+ printf(" save: %s\n",
+ sdslen(cfg->save) ? cfg->save : "NONE");
+ printf(" appendonly: %s\n", cfg->appendonly);
+ }
+ } else {
+ if (config.redis_config) {
+ printf(" host configuration \"save\": %s\n",
+ config.redis_config->save);
+ printf(" host configuration \"appendonly\": %s\n",
+ config.redis_config->appendonly);
+ }
+ }
+ printf(" multi-thread: %s\n", (config.num_threads ? "yes" : "no"));
+ if (config.num_threads)
+ printf(" threads: %d\n", config.num_threads);
+
printf("\n");
qsort(config.latency,config.requests,sizeof(long long),compareLatency);
@@ -467,18 +801,18 @@ static void showLatencyReport(void) {
if (config.latency[i]/usbetweenlat != curlat ||
i == (config.requests-1))
{
- curlat = config.latency[i]/usbetweenlat;
- perc = ((float)(i+1)*100)/config.requests;
- printf("%.2f%% <= %.*f milliseconds\n", perc, config.precision,
- curlat/pow(10.0, config.precision));
-
/* After the 2 milliseconds latency to have percentages split
* by decimals will just add a lot of noise to the output. */
- if (config.latency[i] > 2000) {
+ if (config.latency[i] >= 2000) {
config.precision = 0;
usbetweenlat = ipow(10,
MAX_LATENCY_PRECISION-config.precision);
}
+
+ curlat = config.latency[i]/usbetweenlat;
+ perc = ((float)(i+1)*100)/config.requests;
+ printf("%.2f%% <= %.*f milliseconds\n", perc, config.precision,
+ curlat/pow(10.0, config.precision));
}
}
printf("%.2f requests per second\n\n", reqpersec);
@@ -490,21 +824,428 @@ static void showLatencyReport(void) {
}
static void benchmark(char *title, char *cmd, int len) {
+ int i;
client c;
config.title = title;
config.requests_issued = 0;
config.requests_finished = 0;
- c = createClient(cmd,len,NULL);
+ if (config.num_threads) {
+ if (config.threads) freeBenchmarkThreads();
+ config.threads = zmalloc(config.num_threads * sizeof(benchmarkThread*));
+ for (i = 0; i < config.num_threads; i++) {
+ benchmarkThread *thread = createBenchmarkThread(i);
+ config.threads[i] = thread;
+ }
+ }
+
+ int thread_id = config.num_threads > 0 ? 0 : -1;
+ c = createClient(cmd,len,NULL,thread_id);
createMissingClients(c);
config.start = mstime();
- aeMain(config.el);
+ if (!config.num_threads) aeMain(config.el);
+ else {
+ for (i = 0; i < config.num_threads; i++) {
+ benchmarkThread *t = config.threads[i];
+ if (pthread_create(&(t->thread), NULL, execBenchmarkThread, t)){
+ fprintf(stderr, "FATAL: Failed to start thread %d.\n", i);
+ exit(1);
+ }
+ }
+ for (i = 0; i < config.num_threads; i++)
+ pthread_join(config.threads[i]->thread, NULL);
+ }
config.totlatency = mstime()-config.start;
showLatencyReport();
freeAllClients();
+ if (config.threads) freeBenchmarkThreads();
+}
+
+/* Thread functions. */
+
+static benchmarkThread *createBenchmarkThread(int index) {
+ benchmarkThread *thread = zmalloc(sizeof(*thread));
+ if (thread == NULL) return NULL;
+ thread->index = index;
+ thread->el = aeCreateEventLoop(1024*10);
+ aeCreateTimeEvent(thread->el,1,showThroughput,NULL,NULL);
+ return thread;
+}
+
+static void freeBenchmarkThread(benchmarkThread *thread) {
+ if (thread->el) aeDeleteEventLoop(thread->el);
+ zfree(thread);
+}
+
+static void freeBenchmarkThreads() {
+ int i = 0;
+ for (; i < config.num_threads; i++) {
+ benchmarkThread *thread = config.threads[i];
+ if (thread) freeBenchmarkThread(thread);
+ }
+ zfree(config.threads);
+ config.threads = NULL;
+}
+
+static void *execBenchmarkThread(void *ptr) {
+ benchmarkThread *thread = (benchmarkThread *) ptr;
+ aeMain(thread->el);
+ return NULL;
+}
+
+/* Cluster helper functions. */
+
+static clusterNode *createClusterNode(char *ip, int port) {
+ clusterNode *node = zmalloc(sizeof(*node));
+ if (!node) return NULL;
+ node->ip = ip;
+ node->port = port;
+ node->name = NULL;
+ node->flags = 0;
+ node->replicate = NULL;
+ node->replicas_count = 0;
+ node->slots = zmalloc(CLUSTER_SLOTS * sizeof(int));
+ node->slots_count = 0;
+ node->current_slot_index = 0;
+ node->updated_slots = NULL;
+ node->updated_slots_count = 0;
+ node->migrating = NULL;
+ node->importing = NULL;
+ node->migrating_count = 0;
+ node->importing_count = 0;
+ node->redis_config = NULL;
+ return node;
+}
+
+static void freeClusterNode(clusterNode *node) {
+ int i;
+ if (node->name) sdsfree(node->name);
+ if (node->replicate) sdsfree(node->replicate);
+ if (node->migrating != NULL) {
+ for (i = 0; i < node->migrating_count; i++) sdsfree(node->migrating[i]);
+ zfree(node->migrating);
+ }
+ if (node->importing != NULL) {
+ for (i = 0; i < node->importing_count; i++) sdsfree(node->importing[i]);
+ zfree(node->importing);
+ }
+ /* If the node is not the reference node, that uses the address from
+ * config.hostip and config.hostport, then the node ip has been
+ * allocated by fetchClusterConfiguration, so it must be freed. */
+ if (node->ip && strcmp(node->ip, config.hostip) != 0) sdsfree(node->ip);
+ if (node->redis_config != NULL) freeRedisConfig(node->redis_config);
+ zfree(node->slots);
+ zfree(node);
+}
+
+static void freeClusterNodes() {
+ int i = 0;
+ for (; i < config.cluster_node_count; i++) {
+ clusterNode *n = config.cluster_nodes[i];
+ if (n) freeClusterNode(n);
+ }
+ zfree(config.cluster_nodes);
+ config.cluster_nodes = NULL;
+}
+
+static clusterNode **addClusterNode(clusterNode *node) {
+ int count = config.cluster_node_count + 1;
+ config.cluster_nodes = zrealloc(config.cluster_nodes,
+ count * sizeof(*node));
+ if (!config.cluster_nodes) return NULL;
+ config.cluster_nodes[config.cluster_node_count++] = node;
+ return config.cluster_nodes;
+}
+
+static int fetchClusterConfiguration() {
+ int success = 1;
+ redisContext *ctx = NULL;
+ redisReply *reply = NULL;
+ if (config.hostsocket == NULL)
+ ctx = redisConnect(config.hostip,config.hostport);
+ else
+ ctx = redisConnectUnix(config.hostsocket);
+ if (ctx->err) {
+ fprintf(stderr,"Could not connect to Redis at ");
+ if (config.hostsocket == NULL) {
+ fprintf(stderr,"%s:%d: %s\n",config.hostip,config.hostport,
+ ctx->errstr);
+ } else fprintf(stderr,"%s: %s\n",config.hostsocket,ctx->errstr);
+ exit(1);
+ }
+ clusterNode *firstNode = createClusterNode((char *) config.hostip,
+ config.hostport);
+ if (!firstNode) {success = 0; goto cleanup;}
+ reply = redisCommand(ctx, "CLUSTER NODES");
+ success = (reply != NULL);
+ if (!success) goto cleanup;
+ success = (reply->type != REDIS_REPLY_ERROR);
+ if (!success) {
+ if (config.hostsocket == NULL) {
+ fprintf(stderr, "Cluster node %s:%d replied with error:\n%s\n",
+ config.hostip, config.hostport, reply->str);
+ } else {
+ fprintf(stderr, "Cluster node %s replied with error:\n%s\n",
+ config.hostsocket, reply->str);
+ }
+ goto cleanup;
+ }
+ char *lines = reply->str, *p, *line;
+ while ((p = strstr(lines, "\n")) != NULL) {
+ *p = '\0';
+ line = lines;
+ lines = p + 1;
+ char *name = NULL, *addr = NULL, *flags = NULL, *master_id = NULL;
+ int i = 0;
+ while ((p = strchr(line, ' ')) != NULL) {
+ *p = '\0';
+ char *token = line;
+ line = p + 1;
+ switch(i++){
+ case 0: name = token; break;
+ case 1: addr = token; break;
+ case 2: flags = token; break;
+ case 3: master_id = token; break;
+ }
+ if (i == 8) break; // Slots
+ }
+ if (!flags) {
+ fprintf(stderr, "Invalid CLUSTER NODES reply: missing flags.\n");
+ success = 0;
+ goto cleanup;
+ }
+ int myself = (strstr(flags, "myself") != NULL);
+ int is_replica = (strstr(flags, "slave") != NULL ||
+ (master_id != NULL && master_id[0] != '-'));
+ if (is_replica) continue;
+ if (addr == NULL) {
+ fprintf(stderr, "Invalid CLUSTER NODES reply: missing addr.\n");
+ success = 0;
+ goto cleanup;
+ }
+ clusterNode *node = NULL;
+ char *ip = NULL;
+ int port = 0;
+ char *paddr = strchr(addr, ':');
+ if (paddr != NULL) {
+ *paddr = '\0';
+ ip = addr;
+ addr = paddr + 1;
+ /* If internal bus is specified, then just drop it. */
+ if ((paddr = strchr(addr, '@')) != NULL) *paddr = '\0';
+ port = atoi(addr);
+ }
+ if (myself) {
+ node = firstNode;
+ if (node->ip == NULL && ip != NULL) {
+ node->ip = ip;
+ node->port = port;
+ }
+ } else {
+ node = createClusterNode(sdsnew(ip), port);
+ }
+ if (node == NULL) {
+ success = 0;
+ goto cleanup;
+ }
+ if (name != NULL) node->name = sdsnew(name);
+ if (i == 8) {
+ int remaining = strlen(line);
+ while (remaining > 0) {
+ p = strchr(line, ' ');
+ if (p == NULL) p = line + remaining;
+ remaining -= (p - line);
+
+ char *slotsdef = line;
+ *p = '\0';
+ if (remaining) {
+ line = p + 1;
+ remaining--;
+ } else line = p;
+ char *dash = NULL;
+ if (slotsdef[0] == '[') {
+ slotsdef++;
+ if ((p = strstr(slotsdef, "->-"))) { // Migrating
+ *p = '\0';
+ p += 3;
+ char *closing_bracket = strchr(p, ']');
+ if (closing_bracket) *closing_bracket = '\0';
+ sds slot = sdsnew(slotsdef);
+ sds dst = sdsnew(p);
+ node->migrating_count += 2;
+ node->migrating =
+ zrealloc(node->migrating,
+ (node->migrating_count * sizeof(sds)));
+ node->migrating[node->migrating_count - 2] =
+ slot;
+ node->migrating[node->migrating_count - 1] =
+ dst;
+ } else if ((p = strstr(slotsdef, "-<-"))) {//Importing
+ *p = '\0';
+ p += 3;
+ char *closing_bracket = strchr(p, ']');
+ if (closing_bracket) *closing_bracket = '\0';
+ sds slot = sdsnew(slotsdef);
+ sds src = sdsnew(p);
+ node->importing_count += 2;
+ node->importing = zrealloc(node->importing,
+ (node->importing_count * sizeof(sds)));
+ node->importing[node->importing_count - 2] =
+ slot;
+ node->importing[node->importing_count - 1] =
+ src;
+ }
+ } else if ((dash = strchr(slotsdef, '-')) != NULL) {
+ p = dash;
+ int start, stop;
+ *p = '\0';
+ start = atoi(slotsdef);
+ stop = atoi(p + 1);
+ while (start <= stop) {
+ int slot = start++;
+ node->slots[node->slots_count++] = slot;
+ }
+ } else if (p > slotsdef) {
+ int slot = atoi(slotsdef);
+ node->slots[node->slots_count++] = slot;
+ }
+ }
+ }
+ if (node->slots_count == 0) {
+ printf("WARNING: master node %s:%d has no slots, skipping...\n",
+ node->ip, node->port);
+ continue;
+ }
+ if (!addClusterNode(node)) {
+ success = 0;
+ goto cleanup;
+ }
+ }
+cleanup:
+ if (ctx) redisFree(ctx);
+ if (!success) {
+ if (config.cluster_nodes) freeClusterNodes();
+ }
+ if (reply) freeReplyObject(reply);
+ return success;
+}
+
+/* Request the current cluster slots configuration by calling CLUSTER SLOTS
+ * and atomically update the slots after a successful reply. */
+static int fetchClusterSlotsConfiguration(client c) {
+ UNUSED(c);
+ int success = 1, is_fetching_slots = 0, last_update = 0;
+ size_t i;
+ atomicGet(config.slots_last_update, last_update);
+ if (c->slots_last_update < last_update) {
+ c->slots_last_update = last_update;
+ return -1;
+ }
+ redisReply *reply = NULL;
+ atomicGetIncr(config.is_fetching_slots, is_fetching_slots, 1);
+ if (is_fetching_slots) return -1; //TODO: use other codes || errno ?
+ atomicSet(config.is_fetching_slots, 1);
+ if (config.showerrors)
+ printf("Cluster slots configuration changed, fetching new one...\n");
+ const char *errmsg = "Failed to update cluster slots configuration";
+ static dictType dtype = {
+ dictSdsHash, /* hash function */
+ NULL, /* key dup */
+ NULL, /* val dup */
+ dictSdsKeyCompare, /* key compare */
+ NULL, /* key destructor */
+ NULL /* val destructor */
+ };
+ /* printf("[%d] fetchClusterSlotsConfiguration\n", c->thread_id); */
+ dict *masters = dictCreate(&dtype, NULL);
+ redisContext *ctx = NULL;
+ for (i = 0; i < (size_t) config.cluster_node_count; i++) {
+ clusterNode *node = config.cluster_nodes[i];
+ assert(node->ip != NULL);
+ assert(node->name != NULL);
+ assert(node->port);
+ /* Use first node as entry point to connect to. */
+ if (ctx == NULL) {
+ ctx = redisConnect(node->ip, node->port);
+ if (!ctx || ctx->err) {
+ success = 0;
+ if (ctx && ctx->err)
+ fprintf(stderr, "REDIS CONNECTION ERROR: %s\n", ctx->errstr);
+ goto cleanup;
+ }
+ }
+ if (node->updated_slots != NULL)
+ zfree(node->updated_slots);
+ node->updated_slots = NULL;
+ node->updated_slots_count = 0;
+ dictReplace(masters, node->name, node) ;
+ }
+ reply = redisCommand(ctx, "CLUSTER SLOTS");
+ if (reply == NULL || reply->type == REDIS_REPLY_ERROR) {
+ success = 0;
+ if (reply)
+ fprintf(stderr,"%s\nCLUSTER SLOTS ERROR: %s\n",errmsg,reply->str);
+ goto cleanup;
+ }
+ assert(reply->type == REDIS_REPLY_ARRAY);
+ for (i = 0; i < reply->elements; i++) {
+ redisReply *r = reply->element[i];
+ assert(r->type = REDIS_REPLY_ARRAY);
+ assert(r->elements >= 3);
+ int from, to, slot;
+ from = r->element[0]->integer;
+ to = r->element[1]->integer;
+ redisReply *nr = r->element[2];
+ assert(nr->type == REDIS_REPLY_ARRAY && nr->elements >= 3);
+ assert(nr->element[2]->str != NULL);
+ sds name = sdsnew(nr->element[2]->str);
+ dictEntry *entry = dictFind(masters, name);
+ if (entry == NULL) {
+ success = 0;
+ fprintf(stderr, "%s: could not find node with ID %s in current "
+ "configuration.\n", errmsg, name);
+ if (name) sdsfree(name);
+ goto cleanup;
+ }
+ sdsfree(name);
+ clusterNode *node = dictGetVal(entry);
+ if (node->updated_slots == NULL)
+ node->updated_slots = zcalloc(CLUSTER_SLOTS * sizeof(int));
+ for (slot = from; slot <= to; slot++)
+ node->updated_slots[node->updated_slots_count++] = slot;
+ }
+ updateClusterSlotsConfiguration();
+cleanup:
+ freeReplyObject(reply);
+ redisFree(ctx);
+ dictRelease(masters);
+ atomicSet(config.is_fetching_slots, 0);
+ return success;
+}
+
+/* Atomically update the new slots configuration. */
+static void updateClusterSlotsConfiguration() {
+ pthread_mutex_lock(&config.is_updating_slots_mutex);
+ atomicSet(config.is_updating_slots, 1);
+ int i;
+ for (i = 0; i < config.cluster_node_count; i++) {
+ clusterNode *node = config.cluster_nodes[i];
+ if (node->updated_slots != NULL) {
+ int *oldslots = node->slots;
+ node->slots = node->updated_slots;
+ node->slots_count = node->updated_slots_count;
+ node->current_slot_index = 0;
+ node->updated_slots = NULL;
+ node->updated_slots_count = 0;
+ zfree(oldslots);
+ }
+ }
+ atomicSet(config.is_updating_slots, 0);
+ atomicIncr(config.slots_last_update, 1);
+ pthread_mutex_unlock(&config.is_updating_slots_mutex);
}
/* Returns number of consumed options. */
@@ -582,6 +1323,16 @@ int parseOptions(int argc, const char **argv) {
config.precision = atoi(argv[++i]);
if (config.precision < 0) config.precision = 0;
if (config.precision > MAX_LATENCY_PRECISION) config.precision = MAX_LATENCY_PRECISION;
+ } else if (!strcmp(argv[i],"--threads")) {
+ if (lastarg) goto invalid;
+ config.num_threads = atoi(argv[++i]);
+ if (config.num_threads > MAX_THREADS) {
+ printf("WARNING: too many threads, limiting threads to %d.\n",
+ MAX_THREADS);
+ config.num_threads = MAX_THREADS;
+ } else if (config.num_threads < 0) config.num_threads = 0;
+ } else if (!strcmp(argv[i],"--cluster")) {
+ config.cluster_mode = 1;
} else if (!strcmp(argv[i],"--help")) {
exit_status = 0;
goto usage;
@@ -610,6 +1361,8 @@ usage:
" -n <requests> Total number of requests (default 100000)\n"
" -d <size> Data size of SET/GET value in bytes (default 3)\n"
" --dbnum <db> SELECT the specified db number (default 0)\n"
+" --threads <num> Enable multi-thread mode.\n"
+" --cluster Enable cluster mode.\n"
" -k <boolean> 1=keep alive 0=reconnect (default 1)\n"
" -r <keyspacelen> Use random keys for SET/GET/INCR, random values for SADD\n"
" Using this option the benchmark will expand the string __rand_int__\n"
@@ -650,11 +1403,19 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData
UNUSED(eventLoop);
UNUSED(id);
UNUSED(clientData);
+ int liveclients = 0;
+ int requests_finished = 0;
+ atomicGet(config.liveclients, liveclients);
+ atomicGet(config.requests_finished, requests_finished);
- if (config.liveclients == 0 && config.requests_finished != config.requests) {
+ if (liveclients == 0 && requests_finished != config.requests) {
fprintf(stderr,"All clients disconnected... aborting.\n");
exit(1);
}
+ if (config.num_threads && requests_finished >= config.requests) {
+ aeStop(eventLoop);
+ return AE_NOMORE;
+ }
if (config.csv) return 250;
if (config.idlemode == 1) {
printf("clients: %d\r", config.liveclients);
@@ -662,7 +1423,7 @@ int showThroughput(struct aeEventLoop *eventLoop, long long id, void *clientData
return 250;
}
float dt = (float)(mstime()-config.start)/1000.0;
- float rps = (float)config.requests_finished/dt;
+ float rps = (float)requests_finished/dt;
printf("%s: %.2f\r", config.title, rps);
fflush(stdout);
return 250; /* every 250ms */
@@ -717,6 +1478,15 @@ int main(int argc, const char **argv) {
config.dbnum = 0;
config.auth = NULL;
config.precision = 1;
+ config.num_threads = 0;
+ config.threads = NULL;
+ config.cluster_mode = 0;
+ config.cluster_node_count = 0;
+ config.cluster_nodes = NULL;
+ config.redis_config = NULL;
+ config.is_fetching_slots = 0;
+ config.is_updating_slots = 0;
+ config.slots_last_update = 0;
i = parseOptions(argc,argv);
argc -= i;
@@ -724,13 +1494,65 @@ int main(int argc, const char **argv) {
config.latency = zmalloc(sizeof(long long)*config.requests);
+ if (config.cluster_mode) {
+ /* Fetch cluster configuration. */
+ if (!fetchClusterConfiguration() || !config.cluster_nodes) {
+ if (!config.hostsocket) {
+ fprintf(stderr, "Failed to fetch cluster configuration from "
+ "%s:%d\n", config.hostip, config.hostport);
+ } else {
+ fprintf(stderr, "Failed to fetch cluster configuration from "
+ "%s\n", config.hostsocket);
+ }
+ exit(1);
+ }
+ if (config.cluster_node_count <= 1) {
+ fprintf(stderr, "Invalid cluster: %d node(s).\n",
+ config.cluster_node_count);
+ exit(1);
+ }
+ printf("Cluster has %d master nodes:\n\n", config.cluster_node_count);
+ int i = 0;
+ for (; i < config.cluster_node_count; i++) {
+ clusterNode *node = config.cluster_nodes[i];
+ if (!node) {
+ fprintf(stderr, "Invalid cluster node #%d\n", i);
+ exit(1);
+ }
+ printf("Master %d: ", i);
+ if (node->name) printf("%s ", node->name);
+ printf("%s:%d\n", node->ip, node->port);
+ node->redis_config = getRedisConfig(node->ip, node->port, NULL);
+ if (node->redis_config == NULL) exit(1);
+ }
+ printf("\n");
+ /* Automatically set thread number to node count if not specified
+ * by the user. */
+ if (config.num_threads == 0)
+ config.num_threads = config.cluster_node_count;
+ } else {
+ config.redis_config =
+ getRedisConfig(config.hostip, config.hostport, config.hostsocket);
+ if (config.redis_config == NULL) exit(1);
+ }
+
+ if (config.num_threads > 0) {
+ pthread_mutex_init(&(config.requests_issued_mutex), NULL);
+ pthread_mutex_init(&(config.requests_finished_mutex), NULL);
+ pthread_mutex_init(&(config.liveclients_mutex), NULL);
+ pthread_mutex_init(&(config.is_fetching_slots_mutex), NULL);
+ pthread_mutex_init(&(config.is_updating_slots_mutex), NULL);
+ pthread_mutex_init(&(config.updating_slots_mutex), NULL);
+ pthread_mutex_init(&(config.slots_last_update_mutex), NULL);
+ }
+
if (config.keepalive == 0) {
printf("WARNING: keepalive disabled, you probably need 'echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse' for Linux and 'sudo sysctl -w net.inet.tcp.msl=1000' for Mac OS X in order to use a lot of clients/requests\n");
}
if (config.idlemode) {
printf("Creating %d idle connections and waiting forever (Ctrl+C when done)\n", config.numclients);
- c = createClient("",0,NULL); /* will never receive a reply */
+ c = createClient("",0,NULL,-1); /* will never receive a reply */
createMissingClients(c);
aeMain(config.el);
/* and will wait for every */
@@ -750,6 +1572,7 @@ int main(int argc, const char **argv) {
free(cmd);
} while(config.loop);
+ if (config.redis_config != NULL) freeRedisConfig(config.redis_config);
return 0;
}
@@ -769,63 +1592,63 @@ int main(int argc, const char **argv) {
}
if (test_is_selected("set")) {
- len = redisFormatCommand(&cmd,"SET key:__rand_int__ %s",data);
+ len = redisFormatCommand(&cmd,"SET key:{tag}:__rand_int__ %s",data);
benchmark("SET",cmd,len);
free(cmd);
}
if (test_is_selected("get")) {
- len = redisFormatCommand(&cmd,"GET key:__rand_int__");
+ len = redisFormatCommand(&cmd,"GET key:{tag}:__rand_int__");
benchmark("GET",cmd,len);
free(cmd);
}
if (test_is_selected("incr")) {
- len = redisFormatCommand(&cmd,"INCR counter:__rand_int__");
+ len = redisFormatCommand(&cmd,"INCR counter:{tag}:__rand_int__");
benchmark("INCR",cmd,len);
free(cmd);
}
if (test_is_selected("lpush")) {
- len = redisFormatCommand(&cmd,"LPUSH mylist %s",data);
+ len = redisFormatCommand(&cmd,"LPUSH mylist:{tag} %s",data);
benchmark("LPUSH",cmd,len);
free(cmd);
}
if (test_is_selected("rpush")) {
- len = redisFormatCommand(&cmd,"RPUSH mylist %s",data);
+ len = redisFormatCommand(&cmd,"RPUSH mylist:{tag} %s",data);
benchmark("RPUSH",cmd,len);
free(cmd);
}
if (test_is_selected("lpop")) {
- len = redisFormatCommand(&cmd,"LPOP mylist");
+ len = redisFormatCommand(&cmd,"LPOP mylist:{tag}");
benchmark("LPOP",cmd,len);
free(cmd);
}
if (test_is_selected("rpop")) {
- len = redisFormatCommand(&cmd,"RPOP mylist");
+ len = redisFormatCommand(&cmd,"RPOP mylist:{tag}");
benchmark("RPOP",cmd,len);
free(cmd);
}
if (test_is_selected("sadd")) {
len = redisFormatCommand(&cmd,
- "SADD myset element:__rand_int__");
+ "SADD myset:{tag} element:__rand_int__");
benchmark("SADD",cmd,len);
free(cmd);
}
if (test_is_selected("hset")) {
len = redisFormatCommand(&cmd,
- "HSET myset:__rand_int__ element:__rand_int__ %s",data);
+ "HSET myhash:{tag}:__rand_int__ element:__rand_int__ %s",data);
benchmark("HSET",cmd,len);
free(cmd);
}
if (test_is_selected("spop")) {
- len = redisFormatCommand(&cmd,"SPOP myset");
+ len = redisFormatCommand(&cmd,"SPOP myset:{tag}");
benchmark("SPOP",cmd,len);
free(cmd);
}
@@ -836,31 +1659,31 @@ int main(int argc, const char **argv) {
test_is_selected("lrange_500") ||
test_is_selected("lrange_600"))
{
- len = redisFormatCommand(&cmd,"LPUSH mylist %s",data);
+ len = redisFormatCommand(&cmd,"LPUSH mylist:{tag} %s",data);
benchmark("LPUSH (needed to benchmark LRANGE)",cmd,len);
free(cmd);
}
if (test_is_selected("lrange") || test_is_selected("lrange_100")) {
- len = redisFormatCommand(&cmd,"LRANGE mylist 0 99");
+ len = redisFormatCommand(&cmd,"LRANGE mylist:{tag} 0 99");
benchmark("LRANGE_100 (first 100 elements)",cmd,len);
free(cmd);
}
if (test_is_selected("lrange") || test_is_selected("lrange_300")) {
- len = redisFormatCommand(&cmd,"LRANGE mylist 0 299");
+ len = redisFormatCommand(&cmd,"LRANGE mylist:{tag} 0 299");
benchmark("LRANGE_300 (first 300 elements)",cmd,len);
free(cmd);
}
if (test_is_selected("lrange") || test_is_selected("lrange_500")) {
- len = redisFormatCommand(&cmd,"LRANGE mylist 0 449");
+ len = redisFormatCommand(&cmd,"LRANGE mylist:{tag} 0 449");
benchmark("LRANGE_500 (first 450 elements)",cmd,len);
free(cmd);
}
if (test_is_selected("lrange") || test_is_selected("lrange_600")) {
- len = redisFormatCommand(&cmd,"LRANGE mylist 0 599");
+ len = redisFormatCommand(&cmd,"LRANGE mylist:{tag} 0 599");
benchmark("LRANGE_600 (first 600 elements)",cmd,len);
free(cmd);
}
@@ -869,7 +1692,7 @@ int main(int argc, const char **argv) {
const char *argv[21];
argv[0] = "MSET";
for (i = 1; i < 21; i += 2) {
- argv[i] = "key:__rand_int__";
+ argv[i] = "key:{tag}:__rand_int__";
argv[i+1] = data;
}
len = redisFormatCommandArgv(&cmd,21,argv,NULL);
@@ -880,5 +1703,7 @@ int main(int argc, const char **argv) {
if (!config.csv) printf("\n");
} while(config.loop);
+ if (config.redis_config != NULL) freeRedisConfig(config.redis_config);
+
return 0;
}