diff options
author | Michael Cahill <michael.cahill@wiredtiger.com> | 2013-05-31 15:02:31 +1000 |
---|---|---|
committer | Michael Cahill <michael.cahill@wiredtiger.com> | 2013-05-31 15:02:31 +1000 |
commit | 0ffee6c5f2a16538363878974f541a69102e00ac (patch) | |
tree | 59b12f38f4f45196d818500aee484215a8366a0f | |
parent | 756d8c6422953b70e1463255418d491267733f72 (diff) | |
download | mongo-0ffee6c5f2a16538363878974f541a69102e00ac.tar.gz |
imported patch realloc_def
-rw-r--r-- | src/btree/bt_page.c | 13 | ||||
-rw-r--r-- | src/btree/bt_slvg.c | 20 | ||||
-rw-r--r-- | src/btree/rec_evict.c | 6 | ||||
-rw-r--r-- | src/config/config_check.c | 6 | ||||
-rw-r--r-- | src/cursor/cur_backup.c | 7 | ||||
-rw-r--r-- | src/include/misc.h | 11 | ||||
-rw-r--r-- | src/include/txn.i | 13 | ||||
-rw-r--r-- | src/lsm/lsm_merge.c | 5 | ||||
-rw-r--r-- | src/lsm/lsm_meta.c | 31 | ||||
-rw-r--r-- | src/lsm/lsm_tree.c | 9 | ||||
-rw-r--r-- | src/meta/meta_ckpt.c | 9 | ||||
-rw-r--r-- | src/schema/schema_open.c | 7 |
12 files changed, 52 insertions, 85 deletions
diff --git a/src/btree/bt_page.c b/src/btree/bt_page.c index 5e43c53d6cd..711bb5955c8 100644 --- a/src/btree/bt_page.c +++ b/src/btree/bt_page.c @@ -375,13 +375,13 @@ __inmem_col_var(WT_SESSION_IMPL *session, WT_PAGE *page, size_t *sizep) WT_PAGE_HEADER *dsk; uint64_t recno, rle; size_t bytes_allocated; - uint32_t i, indx, max_repeats, nrepeats; + uint32_t i, indx, nrepeats; btree = S2BT(session); dsk = page->dsk; unpack = &_unpack; repeats = NULL; - bytes_allocated = max_repeats = nrepeats = 0; + bytes_allocated = nrepeats = 0; recno = page->u.col_var.recno; /* @@ -401,13 +401,8 @@ __inmem_col_var(WT_SESSION_IMPL *session, WT_PAGE *page, size_t *sizep) */ rle = __wt_cell_rle(unpack); if (rle > 1) { - if (nrepeats == max_repeats) { - max_repeats = (max_repeats == 0) ? - 10 : 2 * max_repeats; - WT_RET(__wt_realloc(session, &bytes_allocated, - max_repeats * sizeof(WT_COL_RLE), - &repeats)); - } + WT_RET(__wt_realloc_def(session, &bytes_allocated, + nrepeats + 1, &repeats)); repeats[nrepeats].indx = indx; repeats[nrepeats].recno = recno; repeats[nrepeats++].rle = rle; diff --git a/src/btree/bt_slvg.c b/src/btree/bt_slvg.c index d13be782394..79c74a5f0ad 100644 --- a/src/btree/bt_slvg.c +++ b/src/btree/bt_slvg.c @@ -476,9 +476,8 @@ __slvg_trk_leaf(WT_SESSION_IMPL *session, trk = NULL; /* Re-allocate the array of pages, as necessary. */ - if (ss->pages_next * sizeof(WT_TRACK *) == ss->pages_allocated) - WT_RET(__wt_realloc(session, &ss->pages_allocated, - (ss->pages_next + 1000) * sizeof(WT_TRACK *), &ss->pages)); + WT_RET(__wt_realloc_def( + session, &ss->pages_allocated, ss->pages_next + 1, &ss->pages)); /* Allocate a WT_TRACK entry for this new page and fill it in. */ WT_RET(__slvg_trk_init( @@ -585,9 +584,8 @@ __slvg_trk_ovfl(WT_SESSION_IMPL *session, * Reallocate the overflow page array as necessary, then save the * page's location information. */ - if (ss->ovfl_next * sizeof(WT_TRACK *) == ss->ovfl_allocated) - WT_RET(__wt_realloc(session, &ss->ovfl_allocated, - (ss->ovfl_next + 1000) * sizeof(WT_TRACK *), &ss->ovfl)); + WT_RET(__wt_realloc_def( + session, &ss->ovfl_allocated, ss->ovfl_next + 1, &ss->ovfl)); WT_RET(__slvg_trk_init( session, addr, size, dsk->mem_size, dsk->write_gen, ss, &trk)); @@ -933,9 +931,8 @@ delete: WT_RET(__slvg_trk_free(session, * the new element into the array after the existing element (that's * probably wrong, but we'll fix it up in a second). */ - if (ss->pages_next * sizeof(WT_TRACK *) == ss->pages_allocated) - WT_RET(__wt_realloc(session, &ss->pages_allocated, - (ss->pages_next + 1000) * sizeof(WT_TRACK *), &ss->pages)); + WT_RET(__wt_realloc_def( + session, &ss->pages_allocated, ss->pages_next + 1, &ss->pages)); memmove(ss->pages + a_slot + 1, ss->pages + a_slot, (ss->pages_next - a_slot) * sizeof(*ss->pages)); ss->pages[a_slot + 1] = new; @@ -1503,9 +1500,8 @@ delete: WT_RET(__slvg_trk_free(session, * the new element into the array after the existing element (that's * probably wrong, but we'll fix it up in a second). */ - if (ss->pages_next * sizeof(WT_TRACK *) == ss->pages_allocated) - WT_RET(__wt_realloc(session, &ss->pages_allocated, - (ss->pages_next + 1000) * sizeof(WT_TRACK *), &ss->pages)); + WT_RET(__wt_realloc_def( + session, &ss->pages_allocated, ss->pages_next + 1, &ss->pages)); memmove(ss->pages + a_slot + 1, ss->pages + a_slot, (ss->pages_next - a_slot) * sizeof(*ss->pages)); ss->pages[a_slot + 1] = new; diff --git a/src/btree/rec_evict.c b/src/btree/rec_evict.c index 40eaaf5c946..d42361c6974 100644 --- a/src/btree/rec_evict.c +++ b/src/btree/rec_evict.c @@ -504,10 +504,8 @@ __hazard_exclusive(WT_SESSION_IMPL *session, WT_REF *ref, int top) * Make sure there is space to track exclusive access so we can unlock * to clean up. */ - if (session->excl_next * sizeof(WT_REF *) == session->excl_allocated) - WT_RET(__wt_realloc(session, &session->excl_allocated, - (session->excl_next + 50) * sizeof(WT_REF *), - &session->excl)); + WT_RET(__wt_realloc_def(session, &session->excl_allocated, + session->excl_next + 1, &session->excl)); /* * Hazard pointers are acquired down the tree, which means we can't diff --git a/src/config/config_check.c b/src/config/config_check.c index 4ca6b3c9717..7ebb95eec33 100644 --- a/src/config/config_check.c +++ b/src/config/config_check.c @@ -35,10 +35,8 @@ __conn_foc_add(WT_SESSION_IMPL *session, const void *p) * * Our caller is expected to be holding any locks we need. */ - if ((conn->foc_cnt + 1) * sizeof(void *) > conn->foc_size) - WT_RET(__wt_realloc(session, &conn->foc_size, - WT_MAX(20 * sizeof(void *), 2 * conn->foc_size), - &conn->foc)); + WT_RET(__wt_realloc_def( + session, &conn->foc_size, conn->foc_cnt + 1, &conn->foc)); conn->foc[conn->foc_cnt++] = (void *)p; return (0); diff --git a/src/cursor/cur_backup.c b/src/cursor/cur_backup.c index 845213b9ae1..f011b4ce893 100644 --- a/src/cursor/cur_backup.c +++ b/src/cursor/cur_backup.c @@ -443,11 +443,8 @@ __backup_list_append( int need_handle; /* Leave a NULL at the end to mark the end of the list. */ - if ((cb->list_next + 1) * - sizeof(WT_CURSOR_BACKUP_ENTRY) >= cb->list_allocated) - WT_RET(__wt_realloc(session, &cb->list_allocated, - (cb->list_next + 100) * - sizeof(WT_CURSOR_BACKUP_ENTRY *), &cb->list)); + WT_RET(__wt_realloc_def(session, &cb->list_allocated, + cb->list_next + 2, &cb->list)); p = &cb->list[cb->list_next]; p[0].name = p[1].name = NULL; p[0].handle = p[1].handle = NULL; diff --git a/src/include/misc.h b/src/include/misc.h index 1d22922ac0c..ed7549805c7 100644 --- a/src/include/misc.h +++ b/src/include/misc.h @@ -78,6 +78,17 @@ */ #define __wt_calloc_def(session, number, addr) \ __wt_calloc(session, (size_t)(number), sizeof(**(addr)), addr) + +/* + * __wt_realloc_def -- + * Common case allocate-and-grow function. + * Starts by allocating the requested number of items (at least 10), then + * doubles each time the list needs to grow. + */ +#define __wt_realloc_def(session, sizep, number, addr) \ + (((number) * sizeof(**(addr)) <= *(sizep)) ? 0 : \ + __wt_realloc(session, sizep, WT_MAX(*(sizep) * 2, \ + WT_MAX(10, (number)) * sizeof(**(addr))), addr)) /* * Our internal free function clears the underlying address atomically so there * is a smaller chance of racing threads seeing intermediate results while a diff --git a/src/include/txn.i b/src/include/txn.i index 4e597853694..bafc33f2b64 100644 --- a/src/include/txn.i +++ b/src/include/txn.i @@ -22,10 +22,8 @@ __wt_txn_modify(WT_SESSION_IMPL *session, wt_txnid_t *id) txn = &session->txn; WT_ASSERT(session, F_ISSET(txn, TXN_RUNNING)); - if (txn->mod_count * sizeof(wt_txnid_t *) == txn->mod_alloc) - WT_RET(__wt_realloc(session, &txn->mod_alloc, - WT_MAX(10, 2 * txn->mod_count) * - sizeof(wt_txnid_t *), &txn->mod)); + WT_RET(__wt_realloc_def( + session, &txn->mod_alloc, txn->mod_count + 1, &txn->mod)); txn->mod[txn->mod_count++] = id; *id = txn->id; @@ -46,11 +44,8 @@ __wt_txn_modify_ref(WT_SESSION_IMPL *session, WT_REF *ref) txn = &session->txn; WT_ASSERT(session, F_ISSET(txn, TXN_RUNNING)); - if (txn->modref_count * - sizeof(WT_REF *) == txn->modref_alloc) - WT_RET(__wt_realloc(session, &txn->modref_alloc, - WT_MAX(10, 2 * txn->modref_count) * - sizeof(WT_REF *), &txn->modref)); + WT_RET(__wt_realloc_def( + session, &txn->modref_alloc, txn->modref_count + 1, &txn->modref)); txn->modref[txn->modref_count++] = ref; ref->txnid = txn->id; diff --git a/src/lsm/lsm_merge.c b/src/lsm/lsm_merge.c index f3af66a235b..975e4c9f599 100644 --- a/src/lsm/lsm_merge.c +++ b/src/lsm/lsm_merge.c @@ -25,9 +25,8 @@ __wt_lsm_merge_update_tree(WT_SESSION_IMPL *session, /* Setup the array of obsolete chunks. */ if (nchunks > lsm_tree->old_avail) { chunk_sz = sizeof(*lsm_tree->old_chunks); - WT_RET(__wt_realloc(session, - &lsm_tree->old_alloc, - chunk_sz * WT_MAX(10, lsm_tree->nold_chunks + 2 * nchunks), + WT_RET(__wt_realloc_def(session, &lsm_tree->old_alloc, + lsm_tree->nold_chunks - lsm_tree->old_avail + nchunks, &lsm_tree->old_chunks)); lsm_tree->old_avail += (u_int)(lsm_tree->old_alloc / chunk_sz) - lsm_tree->nold_chunks; diff --git a/src/lsm/lsm_meta.c b/src/lsm/lsm_meta.c index 06ae500d624..cbc35345107 100644 --- a/src/lsm/lsm_meta.c +++ b/src/lsm/lsm_meta.c @@ -91,13 +91,9 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) for (nchunks = 0; (ret = __wt_config_next(&lparser, &lk, &lv)) == 0; ) { if (WT_STRING_MATCH("id", lk.str, lk.len)) { - if ((nchunks + 1) * chunk_sz > - lsm_tree->chunk_alloc) - WT_ERR(__wt_realloc(session, - &lsm_tree->chunk_alloc, - WT_MAX(10 * chunk_sz, - 2 * lsm_tree->chunk_alloc), - &lsm_tree->chunk)); + WT_ERR(__wt_realloc_def(session, + &lsm_tree->chunk_alloc, + nchunks + 1, &lsm_tree->chunk)); WT_ERR(__wt_calloc_def( session, 1, &chunk)); lsm_tree->chunk[nchunks++] = chunk; @@ -139,25 +135,18 @@ __wt_lsm_meta_read(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) F_SET(chunk, WT_LSM_CHUNK_BLOOM); continue; } - if ((nchunks + 1) * chunk_sz > - lsm_tree->old_avail * chunk_sz) { - WT_ERR(__wt_realloc(session, - &lsm_tree->old_alloc, - chunk_sz * WT_MAX(10, - lsm_tree->nold_chunks + - 2 * nchunks), - &lsm_tree->old_chunks)); - lsm_tree->nold_chunks = (u_int) - (lsm_tree->old_alloc / chunk_sz); - lsm_tree->old_avail = - lsm_tree->nold_chunks - nchunks; - } + WT_ERR(__wt_realloc_def(session, + &lsm_tree->old_alloc, nchunks + 1, + &lsm_tree->old_chunks)); + lsm_tree->nold_chunks = + (u_int)(lsm_tree->old_alloc / chunk_sz); WT_ERR(__wt_calloc_def(session, 1, &chunk)); lsm_tree->old_chunks[nchunks++] = chunk; WT_ERR(__wt_strndup(session, lk.str, lk.len, &chunk->uri)); F_SET(chunk, WT_LSM_CHUNK_ONDISK); - --lsm_tree->old_avail; + lsm_tree->old_avail = + lsm_tree->nold_chunks - nchunks; } WT_ERR_NOTFOUND_OK(ret); lsm_tree->nold_chunks = nchunks; diff --git a/src/lsm/lsm_tree.c b/src/lsm/lsm_tree.c index 9a92cceae3e..68e2d012104 100644 --- a/src/lsm/lsm_tree.c +++ b/src/lsm/lsm_tree.c @@ -561,13 +561,8 @@ __wt_lsm_tree_switch( cache_sz = S2C(session)->cache_size; new_id = WT_ATOMIC_ADD(lsm_tree->last, 1); - if ((lsm_tree->nchunks + 1) * sizeof(*lsm_tree->chunk) > - lsm_tree->chunk_alloc) - WT_ERR(__wt_realloc(session, - &lsm_tree->chunk_alloc, - WT_MAX(10 * sizeof(*lsm_tree->chunk), - 2 * lsm_tree->chunk_alloc), - &lsm_tree->chunk)); + WT_ERR(__wt_realloc_def(session, &lsm_tree->chunk_alloc, + lsm_tree->nchunks + 1, &lsm_tree->chunk)); /* * In the steady state, we expect that the checkpoint worker thread diff --git a/src/meta/meta_ckpt.c b/src/meta/meta_ckpt.c index 22b620dbd7f..251fa5ade87 100644 --- a/src/meta/meta_ckpt.c +++ b/src/meta/meta_ckpt.c @@ -270,9 +270,8 @@ __wt_meta_ckptlist_get( if (__wt_config_getones(session, config, "checkpoint", &v) == 0 && __wt_config_subinit(session, &ckptconf, &v) == 0) for (; __wt_config_next(&ckptconf, &k, &v) == 0; ++slot) { - if (slot * sizeof(WT_CKPT) == allocated) - WT_ERR(__wt_realloc(session, &allocated, - (slot + 50) * sizeof(WT_CKPT), &ckptbase)); + WT_ERR(__wt_realloc_def( + session, &allocated, slot + 1, &ckptbase)); ckpt = &ckptbase[slot]; WT_ERR(__ckpt_load(session, &k, &v, ckpt)); @@ -288,9 +287,7 @@ __wt_meta_ckptlist_get( * checkpoint). All of that cooperation is handled in the WT_CKPT * structure referenced from the WT_BTREE structure. */ - if ((slot + 2) * sizeof(WT_CKPT) > allocated) - WT_ERR(__wt_realloc(session, &allocated, - (slot + 2) * sizeof(WT_CKPT), &ckptbase)); + WT_ERR(__wt_realloc_def(session, &allocated, slot + 2, &ckptbase)); /* Sort in creation-order. */ qsort(ckptbase, slot, sizeof(WT_CKPT), __ckpt_compare_order); diff --git a/src/schema/schema_open.c b/src/schema/schema_open.c index c95ae0b97d8..7d39182765c 100644 --- a/src/schema/schema_open.c +++ b/src/schema/schema_open.c @@ -249,11 +249,8 @@ __wt_schema_open_index(WT_SESSION_IMPL *session, * Ensure there is space, including if we have to make room for * a new entry in the middle of the list. */ - if (table->idx_alloc <= sizeof(WT_INDEX *) * - ((size_t)WT_MAX(i, table->nindices) + 1)) - WT_ERR(__wt_realloc(session, &table->idx_alloc, - WT_MAX(10 * sizeof(WT_INDEX *), - 2 * table->idx_alloc), &table->indices)); + WT_ERR(__wt_realloc_def(session, &table->idx_alloc, + WT_MAX(i, table->nindices) + 1, &table->indices)); /* Keep the in-memory list in sync with the metadata. */ cmp = 0; |