summaryrefslogtreecommitdiff
path: root/items.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2018-08-07 20:22:50 -0700
committerdormando <dormando@rydia.net>2018-08-08 17:58:33 -0700
commit93dfe276c79803f1f6950ea183c260ce73ddb304 (patch)
tree96cc9bb478b560581336c855558226b0c38d4472 /items.c
parent89bf7ab1cfea2c24d08b9de697215ac7f61a0362 (diff)
downloadmemcached-93dfe276c79803f1f6950ea183c260ce73ddb304.tar.gz
expand NEED_ALIGN for chunked items
some whackarse ARM platforms on specific glibc/gcc (new?) versions trip SIGBUS while reading the header chunk for a split item. the header chunk is unfortunate magic: It lives in ITEM_data() at a random offset, is zero sized, and only exists to simplify code around finding the orignial slab class, and linking/relinking subchunks to an item. there's no fix to this which isn't a lot of code. I need to refactor chunked items, and attempted to do so, but couldn't come up with something I liked quickly enough. This change pads the first chunk if alignment is necessary, which wastes bytes and a little CPU, but I'm not going to worry a ton for these obscure platforms. this works with rebalancing because in the case of ITEM_CHUNKED header, it treats the item size as the size of the class it resides in, and memcpy's the item during recovery. all other cases were changes from ITEM_data to a new ITEM_schunk() inline function that is created when NEED_ALIGN is set, else it's equal to ITEM_data still.
Diffstat (limited to 'items.c')
-rw-r--r--items.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/items.c b/items.c
index 01ce2a4..d4ce5a1 100644
--- a/items.c
+++ b/items.c
@@ -285,6 +285,13 @@ item *do_item_alloc(char *key, const size_t nkey, const unsigned int flags,
if (settings.use_cas) {
htotal += sizeof(uint64_t);
}
+#ifdef NEED_ALIGN
+ // header chunk needs to be padded on some systems
+ int remain = htotal % 8;
+ if (remain != 0) {
+ htotal += 8 - remain;
+ }
+#endif
hdr_id = slabs_clsid(htotal);
it = do_item_alloc_pull(htotal, hdr_id);
/* setting ITEM_CHUNKED is fine here because we aren't LINKED yet. */
@@ -336,7 +343,7 @@ item *do_item_alloc(char *key, const size_t nkey, const unsigned int flags,
/* Initialize internal chunk. */
if (it->it_flags & ITEM_CHUNKED) {
- item_chunk *chunk = (item_chunk *) ITEM_data(it);
+ item_chunk *chunk = (item_chunk *) ITEM_schunk(it);
chunk->next = 0;
chunk->prev = 0;