summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/block/block_compact.c16
-rw-r--r--src/btree/bt_compact.c32
2 files changed, 41 insertions, 7 deletions
diff --git a/src/block/block_compact.c b/src/block/block_compact.c
index 4fcf6ea24ff..5f26fa45f3f 100644
--- a/src/block/block_compact.c
+++ b/src/block/block_compact.c
@@ -55,7 +55,7 @@ __wt_block_compact_skip(WT_SESSION_IMPL *session, WT_BLOCK *block, int *skipp)
WT_EXT *ext;
WT_EXTLIST *el;
WT_FH *fh;
- wt_off_t avail_eighty, avail_ninety, eighty, ninety;
+ wt_off_t avail_eighty, avail_ninety, avail_total, eighty, ninety;
*skipp = 1; /* Return a default skip. */
@@ -67,7 +67,7 @@ __wt_block_compact_skip(WT_SESSION_IMPL *session, WT_BLOCK *block, int *skipp)
* worth doing. Ignore small files, and files where we are unlikely
* to recover 10% of the file.
*/
- if (fh->size <= 10 * 1024)
+ if (fh->size <= 10 * WT_MEGABYTE)
return (0);
__wt_spin_lock(session, &block->live_lock);
@@ -76,19 +76,25 @@ __wt_block_compact_skip(WT_SESSION_IMPL *session, WT_BLOCK *block, int *skipp)
WT_ERR(__block_dump_avail(session, block));
/* Sum the available bytes in the first 80% and 90% of the file. */
- avail_eighty = avail_ninety = 0;
+ avail_eighty = avail_ninety = avail_total = 0;
ninety = fh->size - fh->size / 10;
eighty = fh->size - ((fh->size / 10) * 2);
el = &block->live.avail;
- WT_EXT_FOREACH(ext, el->off)
+ WT_EXT_FOREACH(ext, el->off) {
+ avail_total += ext->size;
if (ext->off < ninety) {
avail_ninety += ext->size;
if (ext->off < eighty)
avail_eighty += ext->size;
}
+ }
WT_ERR(__wt_verbose(session, WT_VERB_COMPACT,
+ "%s: %" PRIuMAX "MB (%" PRIuMAX ") available space in the file",
+ block->name,
+ (uintmax_t)avail_total / WT_MEGABYTE, (uintmax_t)avail_total));
+ WT_ERR(__wt_verbose(session, WT_VERB_COMPACT,
"%s: %" PRIuMAX "MB (%" PRIuMAX ") available space in the first "
"80%% of the file",
block->name,
@@ -115,7 +121,7 @@ __wt_block_compact_skip(WT_SESSION_IMPL *session, WT_BLOCK *block, int *skipp)
* empty file can be processed quickly, so more aggressive compaction is
* less useful.
*/
- if (avail_ninety >= fh->size / 10) {
+ if (avail_total > WT_MEGABYTE && avail_ninety >= fh->size / 10) {
*skipp = 0;
block->compact_pct_tenths = 1;
if (avail_eighty >= ((fh->size / 10) * 2))
diff --git a/src/btree/bt_compact.c b/src/btree/bt_compact.c
index 4709ac3260e..827ab271bb9 100644
--- a/src/btree/bt_compact.c
+++ b/src/btree/bt_compact.c
@@ -65,6 +65,34 @@ __compact_rewrite(WT_SESSION_IMPL *session, WT_REF *ref, int *skipp)
}
/*
+ * __compact_skip --
+ * Return if it may be worthwhile compacting a file.
+ */
+static int
+__compact_skip(WT_SESSION_IMPL *session, int *skipp)
+{
+ WT_BTREE *btree;
+ WT_PAGE_INDEX *pindex;
+
+ *skipp = 1;
+
+ btree = S2BT(session);
+
+ /*
+ * It's safe to look at the pindex of the root page here. Trees with
+ * less than 4 level one pages aren't worth compacting.
+ */
+ pindex = WT_INTL_INDEX_GET_SAFE(btree->root.page);
+ if (pindex->entries < 4)
+ return (0);
+
+ /* Check with the block manager whether compaction may be useful */
+ WT_RET(btree->bm->compact_skip(btree->bm, session, skipp));
+
+ return (0);
+}
+
+/*
* __wt_compact --
* Compact a file.
*/
@@ -93,7 +121,7 @@ __wt_compact(WT_SESSION_IMPL *session, const char *cfg[])
* to compact the data source if we make no progress, set a flag if the
* block layer thinks compaction is possible.
*/
- WT_RET(bm->compact_skip(bm, session, &skip));
+ WT_RET(__compact_skip(session, &skip));
if (skip)
return (0);
@@ -142,7 +170,6 @@ __wt_compact(WT_SESSION_IMPL *session, const char *cfg[])
block_manager_begin = 1;
/* Walk the tree reviewing pages to see if they should be re-written. */
- session->compaction = 1;
for (;;) {
/*
* Pages read for compaction aren't "useful"; don't update the
@@ -159,6 +186,7 @@ __wt_compact(WT_SESSION_IMPL *session, const char *cfg[])
if (skip)
continue;
+ session->compaction = 1;
/* Rewrite the page: mark the page and tree dirty. */
WT_ERR(__wt_page_modify_init(session, ref->page));
__wt_page_modify_set(session, ref->page);