summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSalvatore Sanfilippo <antirez@gmail.com>2018-09-04 12:49:50 +0200
committerGitHub <noreply@github.com>2018-09-04 12:49:50 +0200
commitd60c17cbb3ae0a4b1a435f0b6fc986a60449ebec (patch)
tree69a9908dd012bd54d5d217078bb83c2a751069b2 /src
parent6c001bfc0ddf8e7a93e86dad250e416166253d85 (diff)
parent247d2a734b0434e0f461902f64d32cb6e587709c (diff)
downloadredis-d60c17cbb3ae0a4b1a435f0b6fc986a60449ebec.tar.gz
Merge pull request #5315 from soloestoy/optimize-parsing-large-bulk
networking: optimize parsing large bulk greater than 32k
Diffstat (limited to 'src')
-rw-r--r--src/networking.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/src/networking.c b/src/networking.c
index 0c1b3016f..91bd5a6b9 100644
--- a/src/networking.c
+++ b/src/networking.c
@@ -1294,19 +1294,22 @@ int processMultibulkBuffer(client *c) {
c->qb_pos = newline-c->querybuf+2;
if (ll >= PROTO_MBULK_BIG_ARG) {
- size_t qblen;
-
/* If we are going to read a large object from network
* try to make it likely that it will start at c->querybuf
* boundary so that we can optimize object creation
- * avoiding a large copy of data. */
- sdsrange(c->querybuf,c->qb_pos,-1);
- c->qb_pos = 0;
- qblen = sdslen(c->querybuf);
- /* Hint the sds library about the amount of bytes this string is
- * going to contain. */
- if (qblen < (size_t)ll+2)
- c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2-qblen);
+ * avoiding a large copy of data.
+ *
+ * But only when the data we have not parsed is less than
+ * or equal to ll+2. If the data length is greater than
+ * ll+2, trimming querybuf is just a waste of time, because
+ * at this time the querybuf contains not only our bulk. */
+ if (sdslen(c->querybuf)-c->qb_pos <= (size_t)ll+2) {
+ sdsrange(c->querybuf,c->qb_pos,-1);
+ c->qb_pos = 0;
+ /* Hint the sds library about the amount of bytes this string is
+ * going to contain. */
+ c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2);
+ }
}
c->bulklen = ll;
}