summaryrefslogtreecommitdiff
path: root/items.c
diff options
context:
space:
mode:
authordormando <dormando@rydia.net>2017-12-08 16:52:40 -0800
committerdormando <dormando@rydia.net>2017-12-08 16:52:40 -0800
commit08ea194cd6b4d58c768b7b899f06dc304710d821 (patch)
tree4713d4de08da026e94fc8c4d50c8b3d3b21c7365 /items.c
parent48b07bd3200a92a82b7500b10f14843204502060 (diff)
downloadmemcached-08ea194cd6b4d58c768b7b899f06dc304710d821.tar.gz
extstore: fix size tracking and adjust drop_unread
was early evicting from HOT/WARM LRU's for item headers because the *original* item size was being tracked, then compared to the actual byte totals for the class. also adjusts drop_unread so it drops items which are currently in the COLD_LRU this is expected to be used with very low compacat_under values; ie 2-5 depending on page count and write load. If you can't defrag-compact, drop-compact. but this is still subtly wrong, since drop_compact is now an option.
Diffstat (limited to 'items.c')
-rw-r--r--items.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/items.c b/items.c
index 9f49897..5153420 100644
--- a/items.c
+++ b/items.c
@@ -392,7 +392,16 @@ static void do_item_link_q(item *it) { /* item is the new head */
*head = it;
if (*tail == 0) *tail = it;
sizes[it->slabs_clsid]++;
+#ifdef EXTSTORE
+ if (it->it_flags & ITEM_HDR) {
+ sizes_bytes[it->slabs_clsid] += (ITEM_ntotal(it) - it->nbytes) + sizeof(item_hdr);
+ } else {
+ sizes_bytes[it->slabs_clsid] += ITEM_ntotal(it);
+ }
+#else
sizes_bytes[it->slabs_clsid] += ITEM_ntotal(it);
+#endif
+
return;
}
@@ -428,7 +437,16 @@ static void do_item_unlink_q(item *it) {
if (it->next) it->next->prev = it->prev;
if (it->prev) it->prev->next = it->next;
sizes[it->slabs_clsid]--;
+#ifdef EXTSTORE
+ if (it->it_flags & ITEM_HDR) {
+ sizes_bytes[it->slabs_clsid] -= (ITEM_ntotal(it) - it->nbytes) + sizeof(item_hdr);
+ } else {
+ sizes_bytes[it->slabs_clsid] -= ITEM_ntotal(it);
+ }
+#else
sizes_bytes[it->slabs_clsid] -= ITEM_ntotal(it);
+#endif
+
return;
}